Bug 1674443 - Refactor the texture cache slab allocator. r=gw

This patch simplifies the slab allocator in various ways, most importantly separating the packing logic and texture cache glue (dealing with swizzling, cache entries, etc.). The former is moved into TextureUnits/TextureUnit and the latter is mostly contained into TextureCache.

This patch should have no functional change. The goal to make it easier to introduce custom slab sizes for glyphs in a followup patch, and later use different packing algorithms.

Differential Revision: https://phabricator.services.mozilla.com/D95869
This commit is contained in:
Nicolas Silva 2020-11-13 15:22:12 +00:00
Родитель 31793c037c
Коммит 6b4fae17f8
1 изменённых файлов: 180 добавлений и 182 удалений

Просмотреть файл

@ -7,7 +7,7 @@ use api::{DebugFlags, ImageDescriptor};
use api::units::*;
#[cfg(test)]
use api::{DocumentId, IdNamespace};
use euclid::point2;
use euclid::{point2, size2};
use crate::device::{TextureFilter, TextureFormatPair};
use crate::freelist::{FreeListHandle, WeakFreeListHandle};
use crate::gpu_cache::{GpuCache, GpuCacheHandle};
@ -1028,14 +1028,20 @@ impl TextureCache {
&mut self,
params: &CacheAllocParams,
) -> CacheEntry {
// Mutably borrow the correct texture.
let texture_array = self.shared_textures.select(
let units = self.shared_textures.select(
params.descriptor.size,
params.descriptor.format,
params.filter,
params.shader,
);
let swizzle = if texture_array.formats.external == params.descriptor.format {
let (texture_id, region_index, allocated_rect) = units.allocate(
params.descriptor.size,
&mut self.pending_updates,
&mut self.next_id,
);
let swizzle = if units.formats.external == params.descriptor.format {
Swizzle::default()
} else {
match self.swizzle {
@ -1044,49 +1050,26 @@ impl TextureCache {
}
};
let slab_size = SlabSize::new(params.descriptor.size);
let bpp = units.formats.internal.bytes_per_pixel();
self.shared_bytes_allocated += (allocated_rect.size.area() * bpp) as usize;
let info = TextureCacheAllocInfo {
target: ImageBufferKind::Texture2D,
width: texture_array.size,
height: texture_array.size,
format: texture_array.formats.internal,
filter: texture_array.filter,
layer_count: 1,
is_shared_cache: true,
has_depth: false,
};
let unit_index = if let Some(index) = texture_array.units
.iter()
.position(|unit| unit.can_alloc(slab_size))
{
index
} else {
let index = texture_array.units.len();
let regions_per_row = texture_array.size / texture_array.region_size;
texture_array.units.push(TextureUnit {
texture_id: self.next_id,
regions_per_row,
regions: Vec::new(),
empty_regions: 0,
});
let num_regions = texture_array.regions_per_texture();
let unit = &mut texture_array.units[index];
unit.push_regions(num_regions, texture_array.region_size);
self.pending_updates.push_alloc(self.next_id, info);
self.next_id.0 += 1;
index
};
self.shared_bytes_allocated += slab_size.size_in_bytes(texture_array.formats.internal);
// Do the allocation. This can fail and return None
// if there are no free slots or regions available.
texture_array.alloc(params, unit_index, self.now, swizzle, params.shader)
CacheEntry {
size: params.descriptor.size,
user_data: params.user_data,
last_access: self.now,
details: EntryDetails::Cache {
origin: allocated_rect.origin,
region_index: region_index,
},
uv_rect_handle: GpuCacheHandle::new(),
input_format: params.descriptor.format,
filter: params.filter,
swizzle,
texture_id,
eviction_notice: None,
uv_rect_kind: params.uv_rect_kind,
shader: params.shader
}
}
// Returns true if the given image descriptor *may* be
@ -1259,32 +1242,6 @@ struct SlabSize {
}
impl SlabSize {
fn new(size: DeviceIntSize) -> Self {
let x_size = quantize_dimension(size.width);
let y_size = quantize_dimension(size.height);
let (width, height) = match (x_size, y_size) {
// Special cased rectangular slab pages.
(512, 0..=64) => (512, 64),
(512, 128) => (512, 128),
(512, 256) => (512, 256),
(0..=64, 512) => (64, 512),
(128, 512) => (128, 512),
(256, 512) => (256, 512),
// If none of those fit, use a square slab size.
(x_size, y_size) => {
let square_size = cmp::max(x_size, y_size);
(square_size, square_size)
}
};
SlabSize {
width,
height,
}
}
fn size_in_bytes(&self, format: ImageFormat) -> usize {
let bpp = format.bytes_per_pixel();
(self.width * self.height * bpp) as usize
@ -1398,39 +1355,99 @@ impl TextureRegion {
struct TextureUnit {
texture_id: CacheTextureId,
regions: Vec<TextureRegion>,
region_size: i32,
empty_regions: usize,
regions_per_row: i32,
}
impl TextureUnit {
/// Adds a new empty region to the array.
fn push_regions(&mut self, count: i32, region_size: i32) {
assert!(self.empty_regions <= self.regions.len());
for _ in 0..count {
let index = self.regions.len();
let offset = self.region_offset(index as i32, region_size);
self.regions.push(TextureRegion::new(index, offset));
self.empty_regions += 1;
fn slab_size(&self, size: DeviceIntSize) -> SlabSize {
fn quantize_dimension(size: i32) -> i32 {
match size {
0 => unreachable!(),
1..=16 => 16,
17..=32 => 32,
33..=64 => 64,
65..=128 => 128,
129..=256 => 256,
257..=512 => 512,
_ => panic!("Invalid dimensions for cache!"),
}
}
}
fn region_offset(&self, index: i32, region_size: i32) -> DeviceIntPoint {
point2(
(index % self.regions_per_row) * region_size,
(index / self.regions_per_row) * region_size,
)
}
/// Returns true if we can allocate the given entry.
fn can_alloc(&self, slab_size: SlabSize) -> bool {
self.empty_regions != 0 || self.regions.iter().any(|region| {
region.slab_size == slab_size && !region.free_slots.is_empty()
})
let x_size = quantize_dimension(size.width);
let y_size = quantize_dimension(size.height);
let (width, height) = match (x_size, y_size) {
// Special cased rectangular slab pages.
(512, 0..=64) => (512, 64),
(512, 128) => (512, 128),
(512, 256) => (512, 256),
(0..=64, 512) => (64, 512),
(128, 512) => (128, 512),
(256, 512) => (256, 512),
// If none of those fit, use a square slab size.
(x_size, y_size) => {
let square_size = cmp::max(x_size, y_size);
(square_size, square_size)
}
};
SlabSize {
width,
height,
}
}
fn is_empty(&self) -> bool {
self.empty_regions == self.regions.len()
}
// Returns the region index and allocated rect.
fn allocate(&mut self, size: DeviceIntSize) -> Option<(usize, DeviceIntRect)> {
// Keep track of the location of an empty region,
// in case we need to select a new empty region
// after the loop.
let mut empty_region_index = None;
let slab_size = self.slab_size(size);
let allocated_size = size2(slab_size.width, slab_size.height);
// Run through the existing regions of this size, and see if
// we can find a free block in any of them.
for (i, region) in self.regions.iter_mut().enumerate() {
if region.is_empty() {
empty_region_index = Some(i);
} else if region.slab_size == slab_size {
if let Some(location) = region.alloc() {
return Some((
region.index,
DeviceIntRect {
origin: location,
size: allocated_size,
}
));
}
}
}
if let Some(empty_region_index) = empty_region_index {
let region = &mut self.regions[empty_region_index];
region.init(slab_size, self.region_size, &mut self.empty_regions);
return Some((
region.index,
DeviceIntRect {
origin: region.alloc().unwrap(),
size: allocated_size,
},
))
}
None
}
}
/// A number of 2D textures (single layer), each with a number of
@ -1461,11 +1478,6 @@ impl TextureUnits {
}
}
fn regions_per_texture(&self) -> i32 {
let regions_per_row = self.size / self.region_size;
regions_per_row * regions_per_row
}
/// Returns the number of GPU bytes consumed by this texture array.
fn size_in_bytes(&self) -> usize {
let bpp = self.formats.internal.bytes_per_pixel() as usize;
@ -1473,6 +1485,76 @@ impl TextureUnits {
num_regions * (self.region_size * self.region_size) as usize * bpp
}
fn add_texture(&mut self, texture_id: CacheTextureId) -> usize {
let regions_per_row = self.size / self.region_size;
let num_regions = (regions_per_row * regions_per_row) as usize;
let mut texture = TextureUnit {
texture_id,
regions: Vec::with_capacity(num_regions),
region_size: self.region_size,
empty_regions: num_regions,
};
for index in 0..num_regions {
let offset = point2(
(index as i32 % regions_per_row) * self.region_size,
(index as i32 / regions_per_row) * self.region_size,
);
texture.regions.push(TextureRegion::new(index, offset));
}
let unit_index = self.units.len();
self.units.push(texture);
unit_index
}
/// Returns the texture id, region index and allocated rect.
///
/// Adds a new texture if there's no spot available.
fn allocate(
&mut self,
requested_size: DeviceIntSize,
pending_updates: &mut TextureUpdateList,
next_id: &mut CacheTextureId,
) -> (CacheTextureId, usize, DeviceIntRect) {
let mut allocation = None;
for unit in &mut self.units {
if let Some((region, rect)) = unit.allocate(requested_size) {
allocation = Some((unit.texture_id, region, rect));
}
}
allocation.unwrap_or_else(|| {
let texture_id = *next_id;
next_id.0 += 1;
pending_updates.push_alloc(
texture_id,
TextureCacheAllocInfo {
target: ImageBufferKind::Texture2D,
width: self.size,
height: self.size,
format: self.formats.internal,
filter: self.filter,
layer_count: 1,
is_shared_cache: true,
has_depth: false,
},
);
let unit_index = self.add_texture(texture_id);
let (region_index, rect) = self.units[unit_index]
.allocate(requested_size)
.unwrap();
(texture_id, region_index, rect)
})
}
fn clear(&mut self, updates: &mut TextureUpdateList) {
for unit in self.units.drain(..) {
updates.push_free(unit.texture_id);
@ -1497,77 +1579,6 @@ impl TextureUnits {
profile.set(mem_idx, profiler::bytes_to_mb(self.size_in_bytes()));
}
/// Allocate space in this texture array.
fn alloc(
&mut self,
params: &CacheAllocParams,
unit_index: usize,
now: FrameStamp,
swizzle: Swizzle,
shader: TargetShader,
) -> CacheEntry {
// Quantize the size of the allocation to select a region to
// allocate from.
let slab_size = SlabSize::new(params.descriptor.size);
let unit = &mut self.units[unit_index];
// TODO(gw): For simplicity, the initial implementation just
// has a single vec<> of regions. We could easily
// make this more efficient by storing a list of
// regions for each slab size specifically...
// Keep track of the location of an empty region,
// in case we need to select a new empty region
// after the loop.
let mut empty_region_index = None;
let mut entry_details = None;
// Run through the existing regions of this size, and see if
// we can find a free block in any of them.
for (i, region) in unit.regions.iter_mut().enumerate() {
if region.is_empty() {
empty_region_index = Some(i);
} else if region.slab_size == slab_size {
if let Some(location) = region.alloc() {
entry_details = Some(EntryDetails::Cache {
region_index: region.index,
origin: location,
});
break;
}
}
}
// Find a region of the right size and try to allocate from it.
let details = match entry_details {
Some(details) => details,
None => {
let region = &mut unit.regions[empty_region_index.unwrap()];
region.init(slab_size, self.region_size, &mut unit.empty_regions);
EntryDetails::Cache {
region_index: region.index,
origin: region.alloc().unwrap(),
}
}
};
CacheEntry {
size: params.descriptor.size,
user_data: params.user_data,
last_access: now,
details,
uv_rect_handle: GpuCacheHandle::new(),
input_format: params.descriptor.format,
filter: self.filter,
swizzle,
texture_id: unit.texture_id,
eviction_notice: None,
uv_rect_kind: params.uv_rect_kind,
shader
}
}
#[allow(dead_code)]
pub fn dump_as_svg(&self, output: &mut dyn std::io::Write) -> std::io::Result<()> {
use svg_fmt::*;
@ -1802,16 +1813,3 @@ impl TextureCacheUpdate {
}
}
}
fn quantize_dimension(size: i32) -> i32 {
match size {
0 => unreachable!(),
1..=16 => 16,
17..=32 => 32,
33..=64 => 64,
65..=128 => 128,
129..=256 => 256,
257..=512 => 512,
_ => panic!("Invalid dimensions for cache!"),
}
}