Bug 1564873 - Stop using mem::uninitialized to pass memory to the GPU. r=Gankro

Use Vec::reserve + as_ptr, then raw pointers.

Alternative is to require T: Default, and then push T::default() or something.

Differential Revision: https://phabricator.services.mozilla.com/D53360

--HG--
extra : moz-landing-system : lando
This commit is contained in:
Emilio Cobos Álvarez 2019-11-18 21:40:39 +00:00
Родитель 25af4e72b1
Коммит bfc655e919
2 изменённых файлов: 37 добавлений и 29 удалений

Просмотреть файл

@ -3534,7 +3534,8 @@ impl<'a, T> TextureUploader<'a, T> {
layer_index: i32,
stride: Option<i32>,
format_override: Option<ImageFormat>,
data: &[T],
data: *const T,
len: usize,
) -> usize {
// Textures dimensions may have been clamped by the hardware. Crop the
// upload region to match.
@ -3557,7 +3558,7 @@ impl<'a, T> TextureUploader<'a, T> {
stride as usize
});
let src_size = (rect.size.height as usize - 1) * src_stride + width_bytes;
assert!(src_size <= data.len() * mem::size_of::<T>());
assert!(src_size <= len * mem::size_of::<T>());
// for optimal PBO texture uploads the stride of the data in
// the buffer may have to be a multiple of a certain value.
@ -3591,15 +3592,12 @@ impl<'a, T> TextureUploader<'a, T> {
if src_stride == dst_stride {
// the stride is already optimal, so simply copy
// the data as-is in to the buffer
let elem_count = src_size / mem::size_of::<T>();
assert_eq!(elem_count * mem::size_of::<T>(), src_size);
let slice = &data[.. elem_count];
gl::buffer_sub_data(
self.target.gl,
assert_eq!(src_size % mem::size_of::<T>(), 0);
self.target.gl.buffer_sub_data_untyped(
gl::PIXEL_UNPACK_BUFFER,
buffer.size_used as _,
slice,
buffer.size_used as isize,
src_size as isize,
data as *const _,
);
} else {
// copy the data line-by-line in to the buffer so
@ -3608,11 +3606,12 @@ impl<'a, T> TextureUploader<'a, T> {
gl::PIXEL_UNPACK_BUFFER,
buffer.size_used as _,
dst_size as _,
gl::MAP_WRITE_BIT | gl::MAP_INVALIDATE_RANGE_BIT);
gl::MAP_WRITE_BIT | gl::MAP_INVALIDATE_RANGE_BIT,
);
unsafe {
let src: &[u8] = slice::from_raw_parts(data.as_ptr() as *const u8, src_size);
let dst: &mut [u8] = slice::from_raw_parts_mut(ptr as *mut u8, dst_size);
let src: &[mem::MaybeUninit<u8>] = slice::from_raw_parts(data as *const _, src_size);
let dst: &mut [mem::MaybeUninit<u8>] = slice::from_raw_parts_mut(ptr as *mut _, dst_size);
for y in 0..rect.size.height as usize {
let src_start = y * src_stride;
@ -3641,7 +3640,7 @@ impl<'a, T> TextureUploader<'a, T> {
rect,
layer_index,
stride,
offset: data.as_ptr() as _,
offset: data as _,
format_override,
});
}

Просмотреть файл

@ -1535,7 +1535,7 @@ impl GpuCacheTexture {
DeviceIntSize::new(MAX_VERTEX_TEXTURE_WIDTH as i32, 1),
);
uploader.upload(rect, 0, None, None, &*row.cpu_blocks);
uploader.upload(rect, 0, None, None, row.cpu_blocks.as_ptr(), row.cpu_blocks.len());
row.is_dirty = false;
}
@ -1595,25 +1595,29 @@ impl<T> VertexDataTexture<T> {
debug_assert!(mem::size_of::<T>() % 16 == 0);
let texels_per_item = mem::size_of::<T>() / 16;
let items_per_row = MAX_VERTEX_TEXTURE_WIDTH / texels_per_item;
debug_assert_ne!(items_per_row, 0);
// Ensure we always end up with a texture when leaving this method.
if data.is_empty() {
let mut len = data.len();
if len == 0 {
if self.texture.is_some() {
return;
}
data.push(unsafe { mem::uninitialized() });
}
// Extend the data array to be a multiple of the row size.
// This ensures memory safety when the array is passed to
// OpenGL to upload to the GPU.
if items_per_row != 0 {
while data.len() % items_per_row != 0 {
data.push(unsafe { mem::uninitialized() });
data.reserve(items_per_row);
len = items_per_row;
} else {
// Extend the data array to have enough capacity to upload at least
// a multiple of the row size. This ensures memory safety when the
// array is passed to OpenGL to upload to the GPU.
let extra = len % items_per_row;
if extra != 0 {
let padding = items_per_row - extra;
data.reserve(padding);
len += padding;
}
}
let needed_height = (data.len() / items_per_row) as i32;
let needed_height = (len / items_per_row) as i32;
let existing_height = self.texture.as_ref().map_or(0, |t| t.get_dimensions().height);
// Create a new texture if needed.
@ -1657,9 +1661,11 @@ impl<T> VertexDataTexture<T> {
DeviceIntPoint::zero(),
DeviceIntSize::new(logical_width, needed_height),
);
debug_assert!(len <= data.capacity(), "CPU copy will read out of bounds");
device
.upload_texture(self.texture(), &self.pbo, 0)
.upload(rect, 0, None, None, data);
.upload(rect, 0, None, None, data.as_ptr(), len);
}
fn deinit(mut self, device: &mut Device) {
@ -3455,12 +3461,14 @@ impl Renderer {
&self.texture_cache_upload_pbo,
0,
);
let data = &data[offset as usize ..];
uploader.upload(
rect,
layer_index,
stride,
format_override,
&data[offset as usize ..],
data.as_ptr(),
data.len(),
)
}
TextureUpdateSource::External { id, channel_index } => {
@ -3497,7 +3505,8 @@ impl Renderer {
layer_index,
stride,
format_override,
data,
data.as_ptr(),
data.len()
);
handler.unlock(id, channel_index);
size