Bug 1704911 - Fix Shmem circulation for buffers that are both mappable and mapped at creation r=jgilbert

We are now passing an extra flag on Unmap to keep the shmem around.
Previously, we'd be freeing the Shmem accidentally on buffers that are writable and mapped at creation.
We also add a bit of logging on the WebGPUParent side.

Differential Revision: https://phabricator.services.mozilla.com/D112264
This commit is contained in:
Dzmitry Malyshau 2021-04-16 22:32:44 +00:00
Родитель d55d0ec5db
Коммит 2e592c660f
7 изменённых файлов: 46 добавлений и 16 удалений

Просмотреть файл

@ -42,8 +42,9 @@ NS_IMPL_CYCLE_COLLECTION_TRACE_BEGIN(Buffer)
}
NS_IMPL_CYCLE_COLLECTION_TRACE_END
Buffer::Buffer(Device* const aParent, RawId aId, BufferAddress aSize)
: ChildOf(aParent), mId(aId), mSize(aSize) {
Buffer::Buffer(Device* const aParent, RawId aId, BufferAddress aSize,
bool aMappable)
: ChildOf(aParent), mId(aId), mSize(aSize), mMappable(aMappable) {
mozilla::HoldJSObjects(this);
}
@ -83,6 +84,10 @@ already_AddRefed<dom::Promise> Buffer::MapAsync(
aRv.ThrowInvalidStateError("Unable to map a buffer that is already mapped");
return nullptr;
}
if (!mMappable) {
aRv.ThrowInvalidStateError("Unable to map a buffer that is not mappable");
return nullptr;
}
// Initialize with a dummy shmem, it will become real after the promise is
// resolved.
SetMapped(ipc::Shmem(), aMode == dom::GPUMapMode_Binding::WRITE);
@ -156,7 +161,8 @@ void Buffer::Unmap(JSContext* aCx, ErrorResult& aRv) {
}
};
mParent->UnmapBuffer(mId, std::move(mMapped->mShmem), mMapped->mWritable);
mParent->UnmapBuffer(mId, std::move(mMapped->mShmem), mMapped->mWritable,
mMappable);
mMapped.reset();
}

Просмотреть файл

@ -45,7 +45,7 @@ class Buffer final : public ObjectBase, public ChildOf<Device> {
GPU_DECL_CYCLE_COLLECTION(Buffer)
GPU_DECL_JS_WRAP(Buffer)
Buffer(Device* const aParent, RawId aId, BufferAddress aSize);
Buffer(Device* const aParent, RawId aId, BufferAddress aSize, bool aMappable);
void SetMapped(ipc::Shmem&& aShmem, bool aWritable);
const RawId mId;
@ -58,6 +58,7 @@ class Buffer final : public ObjectBase, public ChildOf<Device> {
// (which may be smaller than `BufferAddress`), but general not all buffers
// are mapped.
const BufferAddress mSize;
const bool mMappable;
nsString mLabel;
// Information about the currently active mapping.
Maybe<MappedInfo> mMapped;

Просмотреть файл

@ -98,7 +98,7 @@ already_AddRefed<Buffer> Device::CreateBuffer(
// If the buffer is not mapped at creation, and it has Shmem, we send it
// to the GPU process. Otherwise, we keep it.
RawId id = mBridge->DeviceCreateBuffer(mId, aDesc);
RefPtr<Buffer> buffer = new Buffer(this, id, aDesc.mSize);
RefPtr<Buffer> buffer = new Buffer(this, id, aDesc.mSize, hasMapFlags);
if (aDesc.mMappedAtCreation) {
buffer->SetMapped(std::move(shmem),
!(aDesc.mUsage & dom::GPUBufferUsage_Binding::MAP_READ));
@ -140,8 +140,9 @@ RefPtr<MappingPromise> Device::MapBufferAsync(RawId aId, uint32_t aMode,
return mBridge->SendBufferMap(aId, mode, offset.value(), size.value());
}
void Device::UnmapBuffer(RawId aId, ipc::Shmem&& aShmem, bool aFlush) {
mBridge->SendBufferUnmap(aId, std::move(aShmem), aFlush);
void Device::UnmapBuffer(RawId aId, ipc::Shmem&& aShmem, bool aFlush,
bool aKeepShmem) {
mBridge->SendBufferUnmap(aId, std::move(aShmem), aFlush, aKeepShmem);
}
already_AddRefed<Texture> Device::CreateTexture(

Просмотреть файл

@ -87,7 +87,8 @@ class Device final : public DOMEventTargetHelper {
RefPtr<MappingPromise> MapBufferAsync(RawId aId, uint32_t aMode,
size_t aOffset, size_t aSize,
ErrorResult& aRv);
void UnmapBuffer(RawId aId, ipc::Shmem&& aShmem, bool aFlush);
void UnmapBuffer(RawId aId, ipc::Shmem&& aShmem, bool aFlush,
bool aKeepShmem);
already_AddRefed<Texture> InitSwapChain(
const dom::GPUSwapChainDescriptor& aDesc,
const dom::GPUExtent3DDict& aExtent3D,

Просмотреть файл

@ -40,7 +40,7 @@ parent:
async AdapterDestroy(RawId selfId);
async BufferReturnShmem(RawId selfId, Shmem shmem);
async BufferMap(RawId selfId, WGPUHostMap hostMap, uint64_t offset, uint64_t size) returns (Shmem sm);
async BufferUnmap(RawId selfId, Shmem shmem, bool flush);
async BufferUnmap(RawId selfId, Shmem shmem, bool flush, bool keepShmem);
async BufferDestroy(RawId selfId);
async TextureDestroy(RawId selfId);
async TextureViewDestroy(RawId selfId);

Просмотреть файл

@ -13,6 +13,8 @@ namespace webgpu {
const uint64_t POLL_TIME_MS = 100;
static mozilla::LazyLogModule sLogger("WebGPU");
// A helper class to force error checks coming across FFI.
// It will assert in destructor if unchecked.
// TODO: refactor this to avoid stack-allocating the buffer all the time.
@ -269,6 +271,8 @@ ipc::IPCResult WebGPUParent::RecvDeviceDestroy(RawId aSelfId) {
ipc::IPCResult WebGPUParent::RecvBufferReturnShmem(RawId aSelfId,
Shmem&& aShmem) {
MOZ_LOG(sLogger, LogLevel::Info,
("RecvBufferReturnShmem %" PRIu64 "\n", aSelfId));
mSharedMemoryMap[aSelfId] = aShmem;
return IPC_OK();
}
@ -310,9 +314,17 @@ ipc::IPCResult WebGPUParent::RecvBufferMap(RawId aSelfId,
ffi::WGPUHostMap aHostMap,
uint64_t aOffset, uint64_t aSize,
BufferMapResolver&& aResolver) {
MOZ_LOG(sLogger, LogLevel::Info,
("RecvBufferMap %" PRIu64 " offset=%" PRIu64 " size=%" PRIu64 "\n",
aSelfId, aOffset, aSize));
auto& shmem = mSharedMemoryMap[aSelfId];
if (!shmem.IsReadable()) {
MOZ_LOG(sLogger, LogLevel::Error, ("\tshmem is empty\n"));
return IPC_OK();
}
auto* request = new MapRequest(mContext, aSelfId, aHostMap, aOffset,
std::move(mSharedMemoryMap[aSelfId]),
std::move(aResolver));
std::move(shmem), std::move(aResolver));
ffi::WGPUBufferMapOperation mapOperation = {
aHostMap, &MapCallback, reinterpret_cast<uint8_t*>(request)};
ffi::wgpu_server_buffer_map(mContext, aSelfId, aOffset, aSize, mapOperation);
@ -320,7 +332,7 @@ ipc::IPCResult WebGPUParent::RecvBufferMap(RawId aSelfId,
}
ipc::IPCResult WebGPUParent::RecvBufferUnmap(RawId aSelfId, Shmem&& aShmem,
bool aFlush) {
bool aFlush, bool aKeepShmem) {
if (aFlush) {
// TODO: flush exact modified sub-range
uint8_t* ptr = ffi::wgpu_server_buffer_get_mapped_range(
@ -331,17 +343,25 @@ ipc::IPCResult WebGPUParent::RecvBufferUnmap(RawId aSelfId, Shmem&& aShmem,
ffi::wgpu_server_buffer_unmap(mContext, aSelfId);
MOZ_LOG(sLogger, LogLevel::Info,
("RecvBufferUnmap %" PRIu64 " flush=%d\n", aSelfId, aFlush));
const auto iter = mSharedMemoryMap.find(aSelfId);
if (iter == mSharedMemoryMap.end()) {
DeallocShmem(aShmem);
} else {
if (iter != mSharedMemoryMap.end()) {
iter->second = aShmem;
} else if (aKeepShmem) {
mSharedMemoryMap[aSelfId] = aShmem;
} else {
// we are here if the buffer was mapped at creation, but doesn't have any
// mapping flags
DeallocShmem(aShmem);
}
return IPC_OK();
}
ipc::IPCResult WebGPUParent::RecvBufferDestroy(RawId aSelfId) {
ffi::wgpu_server_buffer_drop(mContext, aSelfId);
MOZ_LOG(sLogger, LogLevel::Info,
("RecvBufferDestroy %" PRIu64 "\n", aSelfId));
const auto iter = mSharedMemoryMap.find(aSelfId);
if (iter != mSharedMemoryMap.end()) {

Просмотреть файл

@ -34,7 +34,8 @@ class WebGPUParent final : public PWebGPUParent {
ipc::IPCResult RecvBufferMap(RawId aSelfId, ffi::WGPUHostMap aHostMap,
uint64_t aOffset, uint64_t size,
BufferMapResolver&& aResolver);
ipc::IPCResult RecvBufferUnmap(RawId aSelfId, Shmem&& aShmem, bool aFlush);
ipc::IPCResult RecvBufferUnmap(RawId aSelfId, Shmem&& aShmem, bool aFlush,
bool aKeepShmem);
ipc::IPCResult RecvBufferDestroy(RawId aSelfId);
ipc::IPCResult RecvTextureDestroy(RawId aSelfId);
ipc::IPCResult RecvTextureViewDestroy(RawId aSelfId);