Bug 1506206 - Update webrender to commit 9e65149a487afa35994076d7db918c07d2b967bf (WR PR #3293). r=kats

Differential Revision: https://phabricator.services.mozilla.com/D11544

--HG--
extra : moz-landing-system : lando
This commit is contained in:
WR Updater Bot 2018-11-10 01:37:27 +00:00
Родитель 6e620ffbcc
Коммит 844addb852
6 изменённых файлов: 298 добавлений и 294 удалений

Просмотреть файл

@ -818,6 +818,7 @@ pub struct Device {
resource_override_path: Option<PathBuf>,
max_texture_size: u32,
max_texture_layers: u32,
renderer_name: String,
cached_programs: Option<Rc<ProgramCache>>,
@ -903,10 +904,13 @@ impl Device {
cached_programs: Option<Rc<ProgramCache>>,
) -> Device {
let mut max_texture_size = [0];
let mut max_texture_layers = [0];
unsafe {
gl.get_integer_v(gl::MAX_TEXTURE_SIZE, &mut max_texture_size);
gl.get_integer_v(gl::MAX_ARRAY_TEXTURE_LAYERS, &mut max_texture_layers);
}
let max_texture_size = max_texture_size[0] as u32;
let max_texture_layers = max_texture_layers[0] as u32;
let renderer_name = gl.get_string(gl::RENDERER);
let mut extension_count = [0];
@ -1022,6 +1026,7 @@ impl Device {
depth_available: true,
max_texture_size,
max_texture_layers,
renderer_name,
cached_programs,
frame_id: GpuFrameId(0),
@ -1046,10 +1051,23 @@ impl Device {
self.cached_programs = Some(cached_programs);
}
/// Ensures that the maximum texture size is less than or equal to the
/// provided value. If the provided value is less than the value supported
/// by the driver, the latter is used.
pub fn clamp_max_texture_size(&mut self, size: u32) {
self.max_texture_size = self.max_texture_size.min(size);
}
/// Returns the limit on texture dimensions (width or height).
pub fn max_texture_size(&self) -> u32 {
self.max_texture_size
}
/// Returns the limit on texture array layers.
pub fn max_texture_layers(&self) -> usize {
self.max_texture_layers as usize
}
#[cfg(feature = "debug_renderer")]
pub fn get_capabilities(&self) -> &Capabilities {
&self.capabilities

Просмотреть файл

@ -732,7 +732,7 @@ mod test_glyph_rasterizer {
let mut glyph_rasterizer = GlyphRasterizer::new(workers).unwrap();
let mut glyph_cache = GlyphCache::new();
let mut gpu_cache = GpuCache::new();
let mut texture_cache = TextureCache::new(2048);
let mut texture_cache = TextureCache::new(2048, 1024);
let mut render_task_cache = RenderTaskCache::new();
let mut render_task_tree = RenderTaskTree::new(FrameId::invalid());
let mut special_render_passes = SpecialRenderPasses::new(&DeviceIntSize::new(1366, 768));
@ -786,7 +786,7 @@ mod test_glyph_rasterizer {
glyph_rasterizer.resolve_glyphs(
&mut glyph_cache,
&mut TextureCache::new(4096),
&mut TextureCache::new(4096, 1024),
&mut gpu_cache,
&mut render_task_cache,
&mut render_task_tree,

Просмотреть файл

@ -1525,7 +1525,6 @@ pub struct Renderer {
pub gpu_glyph_renderer: GpuGlyphRenderer,
max_texture_size: u32,
max_recorded_profiles: usize,
clear_color: Option<ColorF>,
@ -1669,25 +1668,23 @@ impl Renderer {
device.supports_extension("GL_ARB_blend_func_extended") &&
device.supports_extension("GL_ARB_explicit_attrib_location");
let device_max_size = device.max_texture_size();
// 512 is the minimum that the texture cache can work with.
// Broken GL contexts can return a max texture size of zero (See #1260). Better to
// gracefully fail now than panic as soon as a texture is allocated.
let min_texture_size = 512;
if device_max_size < min_texture_size {
const MIN_TEXTURE_SIZE: u32 = 512;
if let Some(user_limit) = options.max_texture_size {
assert!(user_limit >= MIN_TEXTURE_SIZE);
device.clamp_max_texture_size(user_limit);
}
if device.max_texture_size() < MIN_TEXTURE_SIZE {
// Broken GL contexts can return a max texture size of zero (See #1260).
// Better to gracefully fail now than panic as soon as a texture is allocated.
error!(
"Device reporting insufficient max texture size ({})",
device_max_size
device.max_texture_size()
);
return Err(RendererError::MaxTextureSize);
}
let max_device_size = cmp::max(
cmp::min(
device_max_size,
options.max_texture_size.unwrap_or(device_max_size),
),
min_texture_size,
);
let max_texture_size = device.max_texture_size();
let max_texture_layers = device.max_texture_layers();
register_thread_with_profiler("Compositor".to_owned());
@ -1936,7 +1933,11 @@ impl Renderer {
thread_listener.thread_started(&rb_thread_name);
}
let texture_cache = TextureCache::new(max_device_size);
let texture_cache = TextureCache::new(
max_texture_size,
max_texture_layers,
);
let resource_cache = ResourceCache::new(
texture_cache,
glyph_rasterizer,
@ -1992,7 +1993,6 @@ impl Renderer {
new_scene_indicator: ChangeIndicator::new(),
#[cfg(feature = "debug_renderer")]
slow_frame_indicator: ChangeIndicator::new(),
max_texture_size: max_device_size,
max_recorded_profiles: options.max_recorded_profiles,
clear_color: options.clear_color,
enable_clear_scissor: options.enable_clear_scissor,
@ -2041,7 +2041,7 @@ impl Renderer {
}
pub fn get_max_texture_size(&self) -> u32 {
self.max_texture_size
self.device.max_texture_size()
}
pub fn get_graphics_api_info(&self) -> GraphicsApiInfo {
@ -2751,7 +2751,7 @@ impl Renderer {
(count + list.blocks.len(), cmp::max(height, list.height))
});
if max_requested_height > self.max_texture_size && !self.gpu_cache_overflow {
if max_requested_height > self.get_max_texture_size() && !self.gpu_cache_overflow {
self.gpu_cache_overflow = true;
self.renderer_errors.push(RendererError::MaxTextureSize);
}

Просмотреть файл

@ -2037,8 +2037,10 @@ impl ResourceCache {
self.cached_glyph_dimensions.clear();
self.cached_images.clear();
self.cached_render_tasks.clear();
let max_texture_size = self.texture_cache.max_texture_size();
self.texture_cache = TextureCache::new(max_texture_size);
self.texture_cache = TextureCache::new(
self.texture_cache.max_texture_size(),
self.texture_cache.max_texture_layers(),
);
}
}

Просмотреть файл

@ -22,12 +22,12 @@ use std::rc::Rc;
/// The size of each region/layer in shared cache texture arrays.
const TEXTURE_REGION_DIMENSIONS: u32 = 512;
// Items in the texture cache can either be standalone textures,
// or a sub-rect inside the shared cache.
/// Items in the texture cache can either be standalone textures,
/// or a sub-rect inside the shared cache.
#[derive(Debug)]
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
enum EntryKind {
enum EntryDetails {
Standalone,
Cache {
// Origin within the texture layer where this item exists.
@ -37,13 +37,23 @@ enum EntryKind {
},
}
impl EntryKind {
/// Returns true if this corresponds to a standalone cache entry.
fn is_standalone(&self) -> bool {
matches!(*self, EntryKind::Standalone)
impl EntryDetails {
/// Returns the kind associated with the details.
fn kind(&self) -> EntryKind {
match *self {
EntryDetails::Standalone => EntryKind::Standalone,
EntryDetails::Cache { .. } => EntryKind::Shared,
}
}
}
/// Tag identifying standalone-versus-shared, without the details.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
enum EntryKind {
Standalone,
Shared,
}
#[derive(Debug)]
pub enum CacheEntryMarker {}
@ -57,7 +67,7 @@ struct CacheEntry {
/// Size the requested item, in device pixels.
size: DeviceUintSize,
/// Details specific to standalone or shared items.
kind: EntryKind,
details: EntryDetails,
/// Arbitrary user data associated with this item.
user_data: [f32; 3],
/// The last frame this item was requested for rendering.
@ -81,24 +91,20 @@ impl CacheEntry {
// Create a new entry for a standalone texture.
fn new_standalone(
texture_id: CacheTextureId,
size: DeviceUintSize,
format: ImageFormat,
filter: TextureFilter,
user_data: [f32; 3],
last_access: FrameId,
uv_rect_kind: UvRectKind,
params: &CacheAllocParams,
) -> Self {
CacheEntry {
size,
user_data,
size: params.descriptor.size,
user_data: params.user_data,
last_access,
kind: EntryKind::Standalone,
details: EntryDetails::Standalone,
texture_id,
format,
filter,
format: params.descriptor.format,
filter: params.filter,
uv_rect_handle: GpuCacheHandle::new(),
eviction_notice: None,
uv_rect_kind,
uv_rect_kind: params.uv_rect_kind,
eviction: Eviction::Auto,
}
}
@ -109,9 +115,9 @@ impl CacheEntry {
// to fetch from.
fn update_gpu_cache(&mut self, gpu_cache: &mut GpuCache) {
if let Some(mut request) = gpu_cache.request(&mut self.uv_rect_handle) {
let (origin, layer_index) = match self.kind {
EntryKind::Standalone { .. } => (DeviceUintPoint::zero(), 0.0),
EntryKind::Cache {
let (origin, layer_index) = match self.details {
EntryDetails::Standalone { .. } => (DeviceUintPoint::zero(), 0.0),
EntryDetails::Cache {
origin,
layer_index,
..
@ -210,32 +216,24 @@ impl SharedTextures {
array_a8_linear: TextureArray::new(
ImageFormat::R8,
TextureFilter::Linear,
4,
),
// Used for experimental hdr yuv texture support, but not used in
// production Firefox.
array_a16_linear: TextureArray::new(
ImageFormat::R16,
TextureFilter::Linear,
4,
),
// The primary cache for images, glyphs, etc.
array_rgba8_linear: TextureArray::new(
ImageFormat::BGRA8,
TextureFilter::Linear,
32, /* More than 32 layers breaks on mac intel drivers for some reason */
),
// Used for image-rendering: crisp. This is mostly favicons, which
// are small. Some other images use it too, but those tend to be
// larger than 512x512 and thus don't use the shared cache anyway.
//
// Even though most of the buckets will be sparsely-used, we still
// need a few to account for different favicon sizes. 4 seems enough
// in practice, though we might also be able to get away with 2 or 3.
array_rgba8_nearest: TextureArray::new(
ImageFormat::BGRA8,
TextureFilter::Nearest,
4,
),
}
}
@ -260,6 +258,38 @@ impl SharedTextures {
}
}
/// Lists of strong handles owned by the texture cache. There is only one strong
/// handle for each entry, but unlimited weak handles. Consumers receive the weak
/// handles, and `TextureCache` owns the strong handles internally.
#[derive(Default)]
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
struct EntryHandles {
/// Handles for each standalone texture cache entry.
standalone: Vec<FreeListHandle<CacheEntryMarker>>,
/// Handles for each shared texture cache entry.
shared: Vec<FreeListHandle<CacheEntryMarker>>,
}
impl EntryHandles {
/// Mutably borrows the requested handle list.
fn select(&mut self, kind: EntryKind) -> &mut Vec<FreeListHandle<CacheEntryMarker>> {
if kind == EntryKind::Standalone {
&mut self.standalone
} else {
&mut self.shared
}
}
}
/// Container struct for the various parameters used in cache allocation.
struct CacheAllocParams {
descriptor: ImageDescriptor,
filter: TextureFilter,
user_data: [f32; 3],
uv_rect_kind: UvRectKind,
}
/// General-purpose manager for images in GPU memory. This includes images,
/// rasterized glyphs, rasterized blobs, cached render tasks, etc.
///
@ -283,53 +313,76 @@ pub struct TextureCache {
/// Set of texture arrays in different formats used for the shared cache.
shared_textures: SharedTextures,
// Maximum texture size supported by hardware.
/// Maximum texture size supported by hardware.
max_texture_size: u32,
// The next unused virtual texture ID. Monotonically increasing.
/// Maximum number of texture layers supported by hardware.
max_texture_layers: usize,
/// The next unused virtual texture ID. Monotonically increasing.
next_id: CacheTextureId,
// A list of allocations and updates that need to be
// applied to the texture cache in the rendering thread
// this frame.
/// A list of allocations and updates that need to be applied to the texture
/// cache in the rendering thread this frame.
#[cfg_attr(all(feature = "serde", any(feature = "capture", feature = "replay")), serde(skip))]
pending_updates: TextureUpdateList,
// The current frame ID. Used for cache eviction policies.
/// The current frame ID. Used for cache eviction policies.
frame_id: FrameId,
// Maintains the list of all current items in
// the texture cache.
/// The last FrameId in which we expired the shared cache.
last_shared_cache_expiration: FrameId,
/// Maintains the list of all current items in the texture cache.
entries: FreeList<CacheEntry, CacheEntryMarker>,
// A list of the strong handles of items that were
// allocated in the standalone texture pool. Used
// for evicting old standalone textures.
standalone_entry_handles: Vec<FreeListHandle<CacheEntryMarker>>,
// A list of the strong handles of items that were
// allocated in the shared texture cache. Used
// for evicting old cache items.
shared_entry_handles: Vec<FreeListHandle<CacheEntryMarker>>,
/// Strong handles for all entries allocated from the above `FreeList`.
handles: EntryHandles,
}
impl TextureCache {
pub fn new(max_texture_size: u32) -> Self {
pub fn new(max_texture_size: u32, mut max_texture_layers: usize) -> Self {
if cfg!(target_os = "macos") {
// On MBP integrated Intel GPUs, texture arrays appear to be
// implemented as a single texture of stacked layers, and that
// texture appears to be subject to the texture size limit. As such,
// allocating more than 32 512x512 regions results in a dimension
// longer than 16k (the max texture size), causing incorrect behavior.
//
// So we clamp the number of layers on mac. This results in maximum
// texture array size of 32MB, which isn't ideal but isn't terrible
// either. OpenGL on mac is not long for this earth, so this may be
// good enough until we have WebRender on gfx-rs (on Metal).
//
// Note that we could also define this more generally in terms of
// |max_texture_size / TEXTURE_REGION_DIMENSION|, except:
// * max_texture_size is actually clamped beyond the device limit
// by Gecko to 8192, so we'd need to thread the raw device value
// here, and:
// * The bug we're working around is likely specific to a single
// driver family, and those drivers are also likely to share
// the same max texture size of 16k. If we do encounter a driver
// with the same bug but a lower max texture size, we might need
// to rethink our strategy anyway, since a limit below 32MB might
// start to introduce performance issues.
max_texture_layers = max_texture_layers.min(32);
}
TextureCache {
shared_textures: SharedTextures::new(),
max_texture_size,
max_texture_layers,
next_id: CacheTextureId(1),
pending_updates: TextureUpdateList::new(),
frame_id: FrameId::invalid(),
last_shared_cache_expiration: FrameId::invalid(),
entries: FreeList::new(),
standalone_entry_handles: Vec::new(),
shared_entry_handles: Vec::new(),
handles: EntryHandles::default(),
}
}
pub fn clear(&mut self) {
let standalone_entry_handles = mem::replace(
&mut self.standalone_entry_handles,
&mut self.handles.standalone,
Vec::new(),
);
@ -340,7 +393,7 @@ impl TextureCache {
}
let shared_entry_handles = mem::replace(
&mut self.shared_entry_handles,
&mut self.handles.shared,
Vec::new(),
);
@ -404,6 +457,11 @@ impl TextureCache {
self.max_texture_size
}
#[allow(dead_code)]
pub fn max_texture_layers(&self) -> usize {
self.max_texture_layers
}
pub fn pending_updates(&mut self) -> TextureUpdateList {
mem::replace(&mut self.pending_updates, TextureUpdateList::new())
}
@ -439,13 +497,8 @@ impl TextureCache {
};
if realloc {
self.allocate(
handle,
descriptor,
filter,
user_data,
uv_rect_kind,
);
let params = CacheAllocParams { descriptor, filter, user_data, uv_rect_kind };
self.allocate(&params, handle);
// If we reallocated, we need to upload the whole item again.
dirty_rect = None;
@ -471,9 +524,9 @@ impl TextureCache {
// to upload the new image data into the correct location
// in GPU memory.
if let Some(data) = data {
let (layer_index, origin) = match entry.kind {
EntryKind::Standalone { .. } => (0, DeviceUintPoint::zero()),
EntryKind::Cache {
let (layer_index, origin) = match entry.details {
EntryDetails::Standalone { .. } => (0, DeviceUintPoint::zero()),
EntryDetails::Cache {
layer_index,
origin,
..
@ -519,11 +572,11 @@ impl TextureCache {
.get_opt(handle)
.expect("BUG: was dropped from cache or not updated!");
debug_assert_eq!(entry.last_access, self.frame_id);
let (layer_index, origin) = match entry.kind {
EntryKind::Standalone { .. } => {
let (layer_index, origin) = match entry.details {
EntryDetails::Standalone { .. } => {
(0, DeviceUintPoint::zero())
}
EntryKind::Cache {
EntryDetails::Cache {
layer_index,
origin,
..
@ -549,11 +602,11 @@ impl TextureCache {
.get_opt(handle)
.expect("BUG: was dropped from cache or not updated!");
debug_assert_eq!(entry.last_access, self.frame_id);
let (layer_index, origin) = match entry.kind {
EntryKind::Standalone { .. } => {
let (layer_index, origin) = match entry.details {
EntryDetails::Standalone { .. } => {
(0, DeviceUintPoint::zero())
}
EntryKind::Cache {
EntryDetails::Cache {
layer_index,
origin,
..
@ -574,20 +627,25 @@ impl TextureCache {
}
/// Expires old standalone textures. Called at the end of every frame.
///
/// Most of the time, standalone cache entries correspond to images whose
/// width or height is greater than the region size in the shared cache, i.e.
/// 512 pixels. Cached render tasks also frequently get standalone entries,
/// but those use the Eviction::Eager policy (for now). So the tradeoff here
/// is largely around reducing texture upload jank while keeping memory usage
/// at an acceptable level.
fn expire_old_standalone_entries(&mut self) {
self.expire_old_entries(EntryKind::Standalone);
}
/// Shared eviction code for standalone and shared entries.
///
/// Our eviction scheme is based on the age of the entry, i.e. the difference
/// between the current frame index and that of the last frame in
/// which the entry was used. It does not directly consider the size of the
/// entry, but does consider overall memory usage by WebRender, by making
/// eviction increasingly aggressive as overall memory usage increases.
fn expire_old_standalone_entries(&mut self) {
///
/// Most of the time, standalone cache entries correspond to images whose
/// width or height is greater than the region size in the shared cache, i.e.
/// 512 pixels. Cached render tasks also frequently get standalone entries,
/// but those use the Eviction::Eager policy (for now). So the tradeoff there
/// is largely around reducing texture upload jank while keeping memory usage
/// at an acceptable level.
fn expire_old_entries(&mut self, kind: EntryKind) {
// These parameters are based on some discussion and local tuning, but
// no hard measurement. There may be room for improvement.
//
@ -600,7 +658,7 @@ impl TextureCache {
(total_gpu_bytes_allocated() as f64 / MAX_MEMORY_PRESSURE_BYTES as f64).min(1.0);
// Use the pressure factor to compute the maximum number of frames that
// a standalone texture can go unused before being evicted.
// an entry can go unused before being evicted.
let max_frame_age_raw =
((1.0 - pressure_factor) * MAX_FRAME_AGE_WITHOUT_PRESSURE) as usize;
@ -614,9 +672,9 @@ impl TextureCache {
// Iterate over the entries in reverse order, evicting the ones older than
// the frame age threshold. Reverse order avoids iterator invalidation when
// removing entries.
for i in (0..self.standalone_entry_handles.len()).rev() {
for i in (0..self.handles.select(kind).len()).rev() {
let evict = {
let entry = self.entries.get(&self.standalone_entry_handles[i]);
let entry = self.entries.get(&self.handles.select(kind)[i]);
match entry.eviction {
Eviction::Manual => false,
Eviction::Auto => entry.last_access < frame_id_threshold,
@ -624,7 +682,7 @@ impl TextureCache {
}
};
if evict {
let handle = self.standalone_entry_handles.swap_remove(i);
let handle = self.handles.select(kind).swap_remove(i);
let entry = self.entries.free(handle);
entry.evict();
self.free(entry);
@ -632,74 +690,26 @@ impl TextureCache {
}
}
// Expire old shared items. Pass in the allocation size
// that is being requested, so we know when we've evicted
// enough items to guarantee we can fit this allocation in
// the cache.
fn expire_old_shared_entries(&mut self, required_alloc: &ImageDescriptor) {
let mut eviction_candidates = Vec::new();
let mut retained_entries = Vec::new();
// Build a list of eviction candidates (which are
// anything not used this frame).
for handle in self.shared_entry_handles.drain(..) {
let entry = self.entries.get(&handle);
if entry.eviction == Eviction::Manual || entry.last_access == self.frame_id {
retained_entries.push(handle);
} else {
eviction_candidates.push(handle);
}
/// Expires old shared entries, if we haven't done so recently.
///
/// Returns true if any entries were expired.
fn maybe_expire_old_shared_entries(&mut self) -> bool {
let old_len = self.handles.shared.len();
if self.last_shared_cache_expiration + 25 < self.frame_id {
self.expire_old_entries(EntryKind::Shared);
self.last_shared_cache_expiration = self.frame_id;
}
// Sort by access time so we remove the oldest ones first.
eviction_candidates.sort_by_key(|handle| {
let entry = self.entries.get(handle);
entry.last_access
});
// Doing an eviction is quite expensive, so we don't want to
// do it all the time. To avoid this, try and evict a
// significant number of items each cycle. However, we don't
// want to evict everything we can, since that will result in
// more items being uploaded than necessary.
// Instead, we say we will keep evicting until both of these
// conditions are met:
// - We have evicted some arbitrary number of items (512 currently).
// AND
// - We have freed an item that will definitely allow us to
// fit the currently requested allocation.
let needed_slab_size = SlabSize::new(required_alloc.size);
let mut found_matching_slab = false;
let mut freed_complete_page = false;
let mut evicted_items = 0;
for handle in eviction_candidates {
if evicted_items > 512 && (found_matching_slab || freed_complete_page) {
retained_entries.push(handle);
} else {
let entry = self.entries.free(handle);
entry.evict();
if let Some(region) = self.free(entry) {
found_matching_slab |= region.slab_size == needed_slab_size;
freed_complete_page |= region.is_empty();
}
evicted_items += 1;
}
}
// Keep a record of the remaining handles for next eviction cycle.
self.shared_entry_handles = retained_entries;
self.handles.shared.len() != old_len
}
// Free a cache entry from the standalone list or shared cache.
fn free(&mut self, entry: CacheEntry) -> Option<&TextureRegion> {
match entry.kind {
EntryKind::Standalone { .. } => {
fn free(&mut self, entry: CacheEntry) {
match entry.details {
EntryDetails::Standalone { .. } => {
// This is a standalone texture allocation. Free it directly.
self.pending_updates.push_free(entry.texture_id);
None
}
EntryKind::Cache {
EntryDetails::Cache {
origin,
layer_index,
} => {
@ -710,7 +720,6 @@ impl TextureCache {
layer_index,
);
region.free(origin);
Some(region)
}
}
}
@ -718,13 +727,13 @@ impl TextureCache {
// Attempt to allocate a block from the shared cache.
fn allocate_from_shared_cache(
&mut self,
descriptor: &ImageDescriptor,
filter: TextureFilter,
user_data: [f32; 3],
uv_rect_kind: UvRectKind,
params: &CacheAllocParams
) -> Option<CacheEntry> {
// Mutably borrow the correct texture.
let texture_array = self.shared_textures.select(descriptor.format, filter);
let texture_array = self.shared_textures.select(
params.descriptor.format,
params.filter,
);
// Lazy initialize this texture array if required.
if texture_array.texture_id.is_none() {
@ -735,7 +744,7 @@ impl TextureCache {
let info = TextureCacheAllocInfo {
width: TEXTURE_REGION_DIMENSIONS,
height: TEXTURE_REGION_DIMENSIONS,
format: descriptor.format,
format: params.descriptor.format,
filter: texture_array.filter,
layer_count: 1,
};
@ -747,12 +756,7 @@ impl TextureCache {
// Do the allocation. This can fail and return None
// if there are no free slots or regions available.
texture_array.alloc(
descriptor.size,
user_data,
self.frame_id,
uv_rect_kind,
)
texture_array.alloc(params, self.frame_id)
}
// Returns true if the given image descriptor *may* be
@ -784,108 +788,97 @@ impl TextureCache {
allowed_in_shared_cache
}
// Allocate storage for a given image. This attempts to allocate
// from the shared cache, but falls back to standalone texture
// if the image is too large, or the cache is full.
fn allocate(
/// Allocates a new standalone cache entry.
fn allocate_standalone_entry(
&mut self,
handle: &mut TextureCacheHandle,
descriptor: ImageDescriptor,
filter: TextureFilter,
user_data: [f32; 3],
uv_rect_kind: UvRectKind,
) {
assert!(descriptor.size.width > 0 && descriptor.size.height > 0);
params: &CacheAllocParams,
) -> CacheEntry {
let texture_id = self.next_id;
self.next_id.0 += 1;
// Work out if this image qualifies to go in the shared (batching) cache.
let allowed_in_shared_cache = self.is_allowed_in_shared_cache(
filter,
&descriptor,
// Push a command to allocate device storage of the right size / format.
let info = TextureCacheAllocInfo {
width: params.descriptor.size.width,
height: params.descriptor.size.height,
format: params.descriptor.format,
filter: params.filter,
layer_count: 1,
};
self.pending_updates.push_alloc(texture_id, info);
return CacheEntry::new_standalone(
texture_id,
self.frame_id,
params,
);
let mut allocated_standalone = false;
let mut new_cache_entry = None;
let frame_id = self.frame_id;
}
// If it's allowed in the cache, see if there is a spot for it.
if allowed_in_shared_cache {
/// Allocates a cache entry appropriate for the given parameters.
///
/// This allocates from the shared cache unless the parameters do not meet
/// the shared cache requirements, in which case a standalone texture is
/// used.
fn allocate_cache_entry(
&mut self,
params: &CacheAllocParams,
) -> CacheEntry {
assert!(params.descriptor.size.width > 0 && params.descriptor.size.height > 0);
new_cache_entry = self.allocate_from_shared_cache(
&descriptor,
filter,
user_data,
uv_rect_kind,
);
// If this image doesn't qualify to go in the shared (batching) cache,
// allocate a standalone entry.
if !self.is_allowed_in_shared_cache(params.filter, &params.descriptor) {
return self.allocate_standalone_entry(params);
}
// If we failed to allocate in the shared cache, make some space
// and then try to allocate again. If we still have room to grow
// the cache, we do that. Otherwise, we evict.
//
// We should improve this logic to support some degree of eviction
// before the cache fills up eintirely.
if new_cache_entry.is_none() {
let reallocated = {
let texture_array = self.shared_textures.select(descriptor.format, filter);
let num_regions = texture_array.regions.len();
if num_regions < texture_array.max_layer_count {
// We have room to grow.
let info = TextureCacheAllocInfo {
width: TEXTURE_REGION_DIMENSIONS,
height: TEXTURE_REGION_DIMENSIONS,
format: descriptor.format,
filter: texture_array.filter,
layer_count: (num_regions + 1) as i32,
};
self.pending_updates.push_realloc(texture_array.texture_id.unwrap(), info);
texture_array.regions.push(TextureRegion::new(num_regions));
true
} else {
false
}
};
if !reallocated {
// Out of room. Evict.
self.expire_old_shared_entries(&descriptor);
}
new_cache_entry = self.allocate_from_shared_cache(
&descriptor,
filter,
user_data,
uv_rect_kind,
);
// Try allocating from the shared cache.
if let Some(entry) = self.allocate_from_shared_cache(params) {
return entry;
}
// If we failed to allocate and haven't GCed in a while, do so and try
// again.
if self.maybe_expire_old_shared_entries() {
if let Some(entry) = self.allocate_from_shared_cache(params) {
return entry;
}
}
// If not allowed in the cache, or if the shared cache is full, then it
// will just have to be in a unique texture. This hurts batching but should
// only occur on a small number of images (or pathological test cases!).
if new_cache_entry.is_none() {
let texture_id = self.next_id;
self.next_id.0 += 1;
let added_layer = {
// If we've hit our layer limit, allocate standalone.
let texture_array =
self.shared_textures.select(params.descriptor.format, params.filter);
let num_regions = texture_array.regions.len();
// Add a layer, unless we've hit our limit.
if num_regions < self.max_texture_layers as usize {
let info = TextureCacheAllocInfo {
width: TEXTURE_REGION_DIMENSIONS,
height: TEXTURE_REGION_DIMENSIONS,
format: params.descriptor.format,
filter: texture_array.filter,
layer_count: (num_regions + 1) as i32,
};
self.pending_updates.push_realloc(texture_array.texture_id.unwrap(), info);
texture_array.regions.push(TextureRegion::new(num_regions));
true
} else {
false
}
};
// Push a command to allocate device storage of the right size / format.
let info = TextureCacheAllocInfo {
width: descriptor.size.width,
height: descriptor.size.height,
format: descriptor.format,
filter,
layer_count: 1,
};
self.pending_updates.push_alloc(texture_id, info);
new_cache_entry = Some(CacheEntry::new_standalone(
texture_id,
descriptor.size,
descriptor.format,
filter,
user_data,
frame_id,
uv_rect_kind,
));
allocated_standalone = true;
if added_layer {
self.allocate_from_shared_cache(params)
.expect("Allocation should succeed after adding a fresh layer")
} else {
self.allocate_standalone_entry(params)
}
let new_cache_entry = new_cache_entry.expect("BUG: must have allocated by now");
}
/// Allocates a cache entry for the given parameters, and updates the
/// provided handle to point to the new entry.
fn allocate(&mut self, params: &CacheAllocParams, handle: &mut TextureCacheHandle) {
let new_cache_entry = self.allocate_cache_entry(params);
let new_kind = new_cache_entry.details.kind();
// If the handle points to a valid cache entry, we want to replace the
// cache entry with our newly updated location. We also need to ensure
@ -898,14 +891,14 @@ impl TextureCache {
// This is managed with a database style upsert operation.
match self.entries.upsert(handle, new_cache_entry) {
UpsertResult::Updated(old_entry) => {
if allocated_standalone != old_entry.kind.is_standalone() {
if new_kind != old_entry.details.kind() {
// Handle the rare case than an update moves an entry from
// shared to standalone or vice versa. This involves a linear
// search, but should be rare enough not to matter.
let (from, to) = if allocated_standalone {
(&mut self.shared_entry_handles, &mut self.standalone_entry_handles)
let (from, to) = if new_kind == EntryKind::Standalone {
(&mut self.handles.shared, &mut self.handles.standalone)
} else {
(&mut self.standalone_entry_handles, &mut self.shared_entry_handles)
(&mut self.handles.standalone, &mut self.handles.shared)
};
let idx = from.iter().position(|h| h.weak() == *handle).unwrap();
to.push(from.remove(idx));
@ -914,11 +907,7 @@ impl TextureCache {
}
UpsertResult::Inserted(new_handle) => {
*handle = new_handle.weak();
if allocated_standalone {
self.standalone_entry_handles.push(new_handle);
} else {
self.shared_entry_handles.push(new_handle);
}
self.handles.select(new_kind).push(new_handle);
}
}
}
@ -1068,7 +1057,6 @@ impl TextureRegion {
#[cfg_attr(feature = "replay", derive(Deserialize))]
struct TextureArray {
filter: TextureFilter,
max_layer_count: usize,
format: ImageFormat,
regions: Vec<TextureRegion>,
texture_id: Option<CacheTextureId>,
@ -1078,12 +1066,10 @@ impl TextureArray {
fn new(
format: ImageFormat,
filter: TextureFilter,
max_layer_count: usize,
) -> Self {
TextureArray {
format,
filter,
max_layer_count,
regions: Vec::new(),
texture_id: None,
}
@ -1107,17 +1093,15 @@ impl TextureArray {
}
}
// Allocate space in this texture array.
/// Allocate space in this texture array.
fn alloc(
&mut self,
size: DeviceUintSize,
user_data: [f32; 3],
params: &CacheAllocParams,
frame_id: FrameId,
uv_rect_kind: UvRectKind,
) -> Option<CacheEntry> {
// Quantize the size of the allocation to select a region to
// allocate from.
let slab_size = SlabSize::new(size);
let slab_size = SlabSize::new(params.descriptor.size);
// TODO(gw): For simplicity, the initial implementation just
// has a single vec<> of regions. We could easily
@ -1128,7 +1112,7 @@ impl TextureArray {
// in case we need to select a new empty region
// after the loop.
let mut empty_region_index = None;
let mut entry_kind = None;
let mut entry_details = None;
// Run through the existing regions of this size, and see if
// we can find a free block in any of them.
@ -1137,7 +1121,7 @@ impl TextureArray {
empty_region_index = Some(i);
} else if region.slab_size == slab_size {
if let Some(location) = region.alloc() {
entry_kind = Some(EntryKind::Cache {
entry_details = Some(EntryDetails::Cache {
layer_index: region.layer_index,
origin: location,
});
@ -1147,12 +1131,12 @@ impl TextureArray {
}
// Find a region of the right size and try to allocate from it.
if entry_kind.is_none() {
if entry_details.is_none() {
if let Some(empty_region_index) = empty_region_index {
let region = &mut self.regions[empty_region_index];
region.init(slab_size);
entry_kind = region.alloc().map(|location| {
EntryKind::Cache {
entry_details = region.alloc().map(|location| {
EntryDetails::Cache {
layer_index: region.layer_index,
origin: location,
}
@ -1160,18 +1144,18 @@ impl TextureArray {
}
}
entry_kind.map(|kind| {
entry_details.map(|details| {
CacheEntry {
size,
user_data,
size: params.descriptor.size,
user_data: params.user_data,
last_access: frame_id,
kind,
details,
uv_rect_handle: GpuCacheHandle::new(),
format: self.format,
filter: self.filter,
texture_id: self.texture_id.unwrap(),
eviction_notice: None,
uv_rect_kind,
uv_rect_kind: params.uv_rect_kind,
eviction: Eviction::Auto,
}
})

Просмотреть файл

@ -1 +1 @@
56ffe6edc4c453a370784314194f9516af0a8950
9e65149a487afa35994076d7db918c07d2b967bf