2017-10-28 02:10:06 +03:00
|
|
|
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
|
|
|
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
2017-09-14 19:48:55 +03:00
|
|
|
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
|
|
|
|
|
|
|
#include "IpcResourceUpdateQueue.h"
|
|
|
|
#include <string.h>
|
|
|
|
#include <algorithm>
|
|
|
|
#include "mozilla/Maybe.h"
|
|
|
|
#include "mozilla/ipc/SharedMemory.h"
|
2018-07-06 15:25:33 +03:00
|
|
|
#include "mozilla/layers/PTextureChild.h"
|
2018-01-12 17:11:28 +03:00
|
|
|
#include "mozilla/layers/WebRenderBridgeChild.h"
|
2017-09-14 19:48:55 +03:00
|
|
|
|
|
|
|
namespace mozilla {
|
|
|
|
namespace wr {
|
|
|
|
|
2018-01-12 17:11:28 +03:00
|
|
|
using namespace mozilla::layers;
|
|
|
|
|
|
|
|
ShmSegmentsWriter::ShmSegmentsWriter(layers::WebRenderBridgeChild* aAllocator,
|
|
|
|
size_t aChunkSize)
|
2017-09-14 19:48:55 +03:00
|
|
|
: mShmAllocator(aAllocator), mCursor(0), mChunkSize(aChunkSize) {
|
|
|
|
MOZ_ASSERT(mShmAllocator);
|
|
|
|
}
|
|
|
|
|
|
|
|
ShmSegmentsWriter::~ShmSegmentsWriter() { Clear(); }
|
|
|
|
|
2018-10-11 17:41:46 +03:00
|
|
|
ShmSegmentsWriter::ShmSegmentsWriter(ShmSegmentsWriter&& aOther) noexcept
|
|
|
|
: mSmallAllocs(std::move(aOther.mSmallAllocs)),
|
|
|
|
mLargeAllocs(std::move(aOther.mLargeAllocs)),
|
|
|
|
mShmAllocator(aOther.mShmAllocator),
|
|
|
|
mCursor(aOther.mCursor),
|
|
|
|
mChunkSize(aOther.mChunkSize) {
|
|
|
|
aOther.mCursor = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
ShmSegmentsWriter& ShmSegmentsWriter::operator=(
|
|
|
|
ShmSegmentsWriter&& aOther) noexcept {
|
|
|
|
MOZ_ASSERT(IsEmpty(), "Will forget existing updates!");
|
|
|
|
Clear();
|
|
|
|
mSmallAllocs = std::move(aOther.mSmallAllocs);
|
|
|
|
mLargeAllocs = std::move(aOther.mLargeAllocs);
|
|
|
|
mShmAllocator = aOther.mShmAllocator;
|
|
|
|
mCursor = aOther.mCursor;
|
|
|
|
mChunkSize = aOther.mChunkSize;
|
|
|
|
aOther.mCursor = 0;
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
2017-09-14 19:48:55 +03:00
|
|
|
layers::OffsetRange ShmSegmentsWriter::Write(Range<uint8_t> aBytes) {
|
|
|
|
const size_t start = mCursor;
|
|
|
|
const size_t length = aBytes.length();
|
|
|
|
|
2017-09-20 14:39:19 +03:00
|
|
|
if (length >= mChunkSize * 4) {
|
|
|
|
auto range = AllocLargeChunk(length);
|
2018-02-20 23:36:50 +03:00
|
|
|
if (range.length()) {
|
|
|
|
// Allocation was successful
|
|
|
|
uint8_t* dstPtr = mLargeAllocs.LastElement().get<uint8_t>();
|
|
|
|
memcpy(dstPtr, aBytes.begin().get(), length);
|
|
|
|
}
|
2017-09-20 14:39:19 +03:00
|
|
|
return range;
|
|
|
|
}
|
|
|
|
|
2017-09-14 19:48:55 +03:00
|
|
|
int remainingBytesToCopy = length;
|
|
|
|
|
|
|
|
size_t srcCursor = 0;
|
|
|
|
size_t dstCursor = mCursor;
|
2017-10-12 05:25:34 +03:00
|
|
|
size_t currAllocLen = mSmallAllocs.Length();
|
2017-09-14 19:48:55 +03:00
|
|
|
|
|
|
|
while (remainingBytesToCopy > 0) {
|
2017-09-20 14:39:19 +03:00
|
|
|
if (dstCursor >= mSmallAllocs.Length() * mChunkSize) {
|
2017-10-12 05:25:34 +03:00
|
|
|
if (!AllocChunk()) {
|
2018-02-20 23:37:48 +03:00
|
|
|
// Allocation failed, so roll back to the state at the start of this
|
|
|
|
// Write() call and abort.
|
2017-11-07 10:49:46 +03:00
|
|
|
for (size_t i = mSmallAllocs.Length(); currAllocLen < i; i--) {
|
2018-02-20 23:37:48 +03:00
|
|
|
MOZ_ASSERT(i > 0);
|
|
|
|
RefCountedShmem& shm = mSmallAllocs.ElementAt(i - 1);
|
2018-01-12 17:11:28 +03:00
|
|
|
RefCountedShm::Dealloc(mShmAllocator, shm);
|
2018-02-20 23:37:48 +03:00
|
|
|
mSmallAllocs.RemoveElementAt(i - 1);
|
2017-10-12 05:25:34 +03:00
|
|
|
}
|
2018-02-20 23:37:48 +03:00
|
|
|
MOZ_ASSERT(mSmallAllocs.Length() == currAllocLen);
|
2017-10-12 05:25:34 +03:00
|
|
|
return layers::OffsetRange(0, start, 0);
|
|
|
|
}
|
2018-02-20 23:37:48 +03:00
|
|
|
// Allocation succeeded, so dstCursor should now be pointing to
|
|
|
|
// something inside the allocation buffer
|
|
|
|
MOZ_ASSERT(dstCursor < (mSmallAllocs.Length() * mChunkSize));
|
2017-09-14 19:48:55 +03:00
|
|
|
}
|
|
|
|
|
2017-09-20 14:39:19 +03:00
|
|
|
const size_t dstMaxOffset = mChunkSize * mSmallAllocs.Length();
|
|
|
|
const size_t dstBaseOffset = mChunkSize * (mSmallAllocs.Length() - 1);
|
2017-09-14 19:48:55 +03:00
|
|
|
|
|
|
|
MOZ_ASSERT(dstCursor >= dstBaseOffset);
|
|
|
|
MOZ_ASSERT(dstCursor <= dstMaxOffset);
|
|
|
|
|
|
|
|
size_t availableRange = dstMaxOffset - dstCursor;
|
|
|
|
size_t copyRange = std::min<int>(availableRange, remainingBytesToCopy);
|
|
|
|
|
|
|
|
uint8_t* srcPtr = &aBytes[srcCursor];
|
2018-01-12 17:11:28 +03:00
|
|
|
uint8_t* dstPtr = RefCountedShm::GetBytes(mSmallAllocs.LastElement()) +
|
|
|
|
(dstCursor - dstBaseOffset);
|
2017-09-14 19:48:55 +03:00
|
|
|
|
|
|
|
memcpy(dstPtr, srcPtr, copyRange);
|
|
|
|
|
|
|
|
srcCursor += copyRange;
|
|
|
|
dstCursor += copyRange;
|
|
|
|
remainingBytesToCopy -= copyRange;
|
|
|
|
|
|
|
|
// sanity check
|
|
|
|
MOZ_ASSERT(remainingBytesToCopy >= 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
mCursor += length;
|
|
|
|
|
2017-09-20 14:39:19 +03:00
|
|
|
return layers::OffsetRange(0, start, length);
|
2017-09-14 19:48:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
bool ShmSegmentsWriter::AllocChunk() {
|
2018-01-12 17:11:28 +03:00
|
|
|
RefCountedShmem shm;
|
2018-01-12 17:11:32 +03:00
|
|
|
if (!mShmAllocator->AllocResourceShmem(mChunkSize, shm)) {
|
2017-10-12 05:25:34 +03:00
|
|
|
gfxCriticalNote << "ShmSegmentsWriter failed to allocate chunk #"
|
|
|
|
<< mSmallAllocs.Length();
|
|
|
|
MOZ_ASSERT(false, "ShmSegmentsWriter fails to allocate chunk");
|
|
|
|
return false;
|
2017-09-20 14:39:19 +03:00
|
|
|
}
|
2018-01-12 17:11:32 +03:00
|
|
|
RefCountedShm::AddRef(shm);
|
2017-09-20 14:39:19 +03:00
|
|
|
mSmallAllocs.AppendElement(shm);
|
2017-10-12 05:25:34 +03:00
|
|
|
return true;
|
2017-09-20 14:39:19 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
layers::OffsetRange ShmSegmentsWriter::AllocLargeChunk(size_t aSize) {
|
|
|
|
ipc::Shmem shm;
|
|
|
|
auto shmType = ipc::SharedMemory::SharedMemoryType::TYPE_BASIC;
|
|
|
|
if (!mShmAllocator->AllocShmem(aSize, shmType, &shm)) {
|
2017-10-12 05:25:34 +03:00
|
|
|
gfxCriticalNote
|
|
|
|
<< "ShmSegmentsWriter failed to allocate large chunk of size " << aSize;
|
|
|
|
MOZ_ASSERT(false, "ShmSegmentsWriter fails to allocate large chunk");
|
|
|
|
return layers::OffsetRange(0, 0, 0);
|
2017-09-14 19:48:55 +03:00
|
|
|
}
|
2017-09-20 14:39:19 +03:00
|
|
|
mLargeAllocs.AppendElement(shm);
|
|
|
|
|
|
|
|
return layers::OffsetRange(mLargeAllocs.Length(), 0, aSize);
|
2017-09-14 19:48:55 +03:00
|
|
|
}
|
|
|
|
|
2018-01-12 17:11:28 +03:00
|
|
|
void ShmSegmentsWriter::Flush(nsTArray<RefCountedShmem>& aSmallAllocs,
|
|
|
|
nsTArray<ipc::Shmem>& aLargeAllocs) {
|
2018-01-12 17:11:32 +03:00
|
|
|
MOZ_ASSERT(aSmallAllocs.IsEmpty());
|
|
|
|
MOZ_ASSERT(aLargeAllocs.IsEmpty());
|
2017-09-20 14:39:19 +03:00
|
|
|
mSmallAllocs.SwapElements(aSmallAllocs);
|
|
|
|
mLargeAllocs.SwapElements(aLargeAllocs);
|
2018-02-20 23:37:49 +03:00
|
|
|
mCursor = 0;
|
2017-09-14 19:48:55 +03:00
|
|
|
}
|
|
|
|
|
2018-02-22 19:13:31 +03:00
|
|
|
bool ShmSegmentsWriter::IsEmpty() const { return mCursor == 0; }
|
|
|
|
|
2017-09-14 19:48:55 +03:00
|
|
|
void ShmSegmentsWriter::Clear() {
|
|
|
|
if (mShmAllocator) {
|
2018-01-12 17:11:32 +03:00
|
|
|
IpcResourceUpdateQueue::ReleaseShmems(mShmAllocator, mSmallAllocs);
|
|
|
|
IpcResourceUpdateQueue::ReleaseShmems(mShmAllocator, mLargeAllocs);
|
2017-09-14 19:48:55 +03:00
|
|
|
}
|
|
|
|
mCursor = 0;
|
|
|
|
}
|
|
|
|
|
2018-01-12 17:11:28 +03:00
|
|
|
ShmSegmentsReader::ShmSegmentsReader(
|
|
|
|
const nsTArray<RefCountedShmem>& aSmallShmems,
|
2017-09-20 14:39:19 +03:00
|
|
|
const nsTArray<ipc::Shmem>& aLargeShmems)
|
|
|
|
: mSmallAllocs(aSmallShmems), mLargeAllocs(aLargeShmems), mChunkSize(0) {
|
|
|
|
if (mSmallAllocs.IsEmpty()) {
|
2017-09-14 19:48:55 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-01-12 17:11:28 +03:00
|
|
|
mChunkSize = RefCountedShm::GetSize(mSmallAllocs[0]);
|
2017-09-14 19:48:55 +03:00
|
|
|
|
|
|
|
// Check that all shmems are readable and have the same size. If anything
|
|
|
|
// isn't right, set mChunkSize to zero which signifies that the reader is
|
|
|
|
// in an invalid state and Read calls will return false;
|
2017-09-20 14:39:19 +03:00
|
|
|
for (const auto& shm : mSmallAllocs) {
|
2018-01-12 17:11:28 +03:00
|
|
|
if (!RefCountedShm::IsValid(shm) ||
|
|
|
|
RefCountedShm::GetSize(shm) != mChunkSize ||
|
|
|
|
RefCountedShm::GetBytes(shm) == nullptr) {
|
2017-09-14 19:48:55 +03:00
|
|
|
mChunkSize = 0;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2017-09-20 14:39:19 +03:00
|
|
|
|
|
|
|
for (const auto& shm : mLargeAllocs) {
|
|
|
|
if (!shm.IsReadable() || shm.get<uint8_t>() == nullptr) {
|
|
|
|
mChunkSize = 0;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2017-09-14 19:48:55 +03:00
|
|
|
}
|
|
|
|
|
2018-01-04 21:23:34 +03:00
|
|
|
bool ShmSegmentsReader::ReadLarge(const layers::OffsetRange& aRange,
|
|
|
|
wr::Vec<uint8_t>& aInto) {
|
2017-09-20 14:39:19 +03:00
|
|
|
// source = zero is for small allocs.
|
|
|
|
MOZ_RELEASE_ASSERT(aRange.source() != 0);
|
|
|
|
if (aRange.source() > mLargeAllocs.Length()) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
size_t id = aRange.source() - 1;
|
|
|
|
const ipc::Shmem& shm = mLargeAllocs[id];
|
|
|
|
if (shm.Size<uint8_t>() < aRange.length()) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint8_t* srcPtr = shm.get<uint8_t>();
|
|
|
|
aInto.PushBytes(Range<uint8_t>(srcPtr, aRange.length()));
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-01-04 21:23:34 +03:00
|
|
|
bool ShmSegmentsReader::Read(const layers::OffsetRange& aRange,
|
|
|
|
wr::Vec<uint8_t>& aInto) {
|
2017-09-25 02:11:17 +03:00
|
|
|
if (aRange.length() == 0) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-09-20 14:39:19 +03:00
|
|
|
if (aRange.source() != 0) {
|
|
|
|
return ReadLarge(aRange, aInto);
|
|
|
|
}
|
|
|
|
|
2017-09-14 19:48:55 +03:00
|
|
|
if (mChunkSize == 0) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-09-20 14:39:19 +03:00
|
|
|
if (aRange.start() + aRange.length() > mChunkSize * mSmallAllocs.Length()) {
|
2017-09-14 19:48:55 +03:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t initialLength = aInto.Length();
|
|
|
|
|
|
|
|
size_t srcCursor = aRange.start();
|
|
|
|
int remainingBytesToCopy = aRange.length();
|
|
|
|
while (remainingBytesToCopy > 0) {
|
|
|
|
const size_t shm_idx = srcCursor / mChunkSize;
|
|
|
|
const size_t ptrOffset = srcCursor % mChunkSize;
|
|
|
|
const size_t copyRange =
|
|
|
|
std::min<int>(remainingBytesToCopy, mChunkSize - ptrOffset);
|
2018-01-12 17:11:28 +03:00
|
|
|
uint8_t* srcPtr =
|
|
|
|
RefCountedShm::GetBytes(mSmallAllocs[shm_idx]) + ptrOffset;
|
2017-09-14 19:48:55 +03:00
|
|
|
|
|
|
|
aInto.PushBytes(Range<uint8_t>(srcPtr, copyRange));
|
|
|
|
|
|
|
|
srcCursor += copyRange;
|
|
|
|
remainingBytesToCopy -= copyRange;
|
|
|
|
}
|
|
|
|
|
|
|
|
return aInto.Length() - initialLength == aRange.length();
|
|
|
|
}
|
|
|
|
|
2018-01-12 17:11:28 +03:00
|
|
|
IpcResourceUpdateQueue::IpcResourceUpdateQueue(
|
2019-03-30 09:41:48 +03:00
|
|
|
layers::WebRenderBridgeChild* aAllocator, wr::RenderRoot aRenderRoot,
|
2019-03-22 21:28:42 +03:00
|
|
|
size_t aChunkSize)
|
|
|
|
: mWriter(aAllocator, aChunkSize), mRenderRoot(aRenderRoot) {}
|
2017-09-14 19:48:55 +03:00
|
|
|
|
2018-10-11 17:41:46 +03:00
|
|
|
IpcResourceUpdateQueue::IpcResourceUpdateQueue(
|
|
|
|
IpcResourceUpdateQueue&& aOther) noexcept
|
|
|
|
: mWriter(std::move(aOther.mWriter)),
|
2019-03-22 21:28:42 +03:00
|
|
|
mUpdates(std::move(aOther.mUpdates)),
|
|
|
|
mRenderRoot(aOther.mRenderRoot) {
|
|
|
|
for (auto renderRoot : wr::kNonDefaultRenderRoots) {
|
|
|
|
mSubQueues[renderRoot] = std::move(aOther.mSubQueues[renderRoot]);
|
|
|
|
}
|
|
|
|
}
|
2018-10-11 17:41:46 +03:00
|
|
|
|
|
|
|
IpcResourceUpdateQueue& IpcResourceUpdateQueue::operator=(
|
|
|
|
IpcResourceUpdateQueue&& aOther) noexcept {
|
|
|
|
MOZ_ASSERT(IsEmpty(), "Will forget existing updates!");
|
|
|
|
mWriter = std::move(aOther.mWriter);
|
|
|
|
mUpdates = std::move(aOther.mUpdates);
|
2019-03-22 21:28:42 +03:00
|
|
|
mRenderRoot = aOther.mRenderRoot;
|
|
|
|
for (auto renderRoot : wr::kNonDefaultRenderRoots) {
|
|
|
|
mSubQueues[renderRoot] = std::move(aOther.mSubQueues[renderRoot]);
|
|
|
|
}
|
2018-10-11 17:41:46 +03:00
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
2019-03-30 09:41:48 +03:00
|
|
|
void IpcResourceUpdateQueue::ReplaceResources(IpcResourceUpdateQueue&& aOther) {
|
2019-03-22 21:28:42 +03:00
|
|
|
MOZ_ASSERT(IsEmpty(), "Will forget existing updates!");
|
|
|
|
MOZ_ASSERT(!aOther.HasAnySubQueue(), "Subqueues will be lost!");
|
|
|
|
MOZ_ASSERT(mRenderRoot == aOther.mRenderRoot);
|
|
|
|
mWriter = std::move(aOther.mWriter);
|
|
|
|
mUpdates = std::move(aOther.mUpdates);
|
|
|
|
mRenderRoot = aOther.mRenderRoot;
|
|
|
|
}
|
|
|
|
|
2017-09-14 19:48:55 +03:00
|
|
|
bool IpcResourceUpdateQueue::AddImage(ImageKey key,
|
|
|
|
const ImageDescriptor& aDescriptor,
|
|
|
|
Range<uint8_t> aBytes) {
|
|
|
|
auto bytes = mWriter.Write(aBytes);
|
2017-10-12 05:25:34 +03:00
|
|
|
if (!bytes.length()) {
|
|
|
|
return false;
|
|
|
|
}
|
2017-09-14 19:48:55 +03:00
|
|
|
mUpdates.AppendElement(layers::OpAddImage(aDescriptor, bytes, 0, key));
|
2017-10-12 05:25:34 +03:00
|
|
|
return true;
|
2017-09-14 19:48:55 +03:00
|
|
|
}
|
|
|
|
|
2018-11-24 02:33:49 +03:00
|
|
|
bool IpcResourceUpdateQueue::AddBlobImage(BlobImageKey key,
|
|
|
|
const ImageDescriptor& aDescriptor,
|
2019-07-13 22:07:05 +03:00
|
|
|
Range<uint8_t> aBytes,
|
|
|
|
ImageIntRect aVisibleRect) {
|
2018-03-26 22:23:20 +03:00
|
|
|
MOZ_RELEASE_ASSERT(aDescriptor.width > 0 && aDescriptor.height > 0);
|
2017-09-14 19:48:55 +03:00
|
|
|
auto bytes = mWriter.Write(aBytes);
|
2017-10-12 05:25:34 +03:00
|
|
|
if (!bytes.length()) {
|
|
|
|
return false;
|
|
|
|
}
|
2019-07-13 22:07:05 +03:00
|
|
|
mUpdates.AppendElement(
|
|
|
|
layers::OpAddBlobImage(aDescriptor, bytes, aVisibleRect, 0, key));
|
2017-10-12 05:25:34 +03:00
|
|
|
return true;
|
2017-09-14 19:48:55 +03:00
|
|
|
}
|
|
|
|
|
2017-09-20 11:56:09 +03:00
|
|
|
void IpcResourceUpdateQueue::AddExternalImage(wr::ExternalImageId aExtId,
|
|
|
|
wr::ImageKey aKey) {
|
|
|
|
mUpdates.AppendElement(layers::OpAddExternalImage(aExtId, aKey));
|
|
|
|
}
|
|
|
|
|
2018-07-11 11:45:28 +03:00
|
|
|
void IpcResourceUpdateQueue::PushExternalImageForTexture(
|
|
|
|
wr::ExternalImageId aExtId, wr::ImageKey aKey,
|
|
|
|
layers::TextureClient* aTexture, bool aIsUpdate) {
|
2018-07-06 15:25:33 +03:00
|
|
|
MOZ_ASSERT(aTexture);
|
|
|
|
MOZ_ASSERT(aTexture->GetIPDLActor());
|
|
|
|
MOZ_RELEASE_ASSERT(aTexture->GetIPDLActor()->GetIPCChannel() ==
|
|
|
|
mWriter.WrBridge()->GetIPCChannel());
|
2018-07-11 11:45:28 +03:00
|
|
|
mUpdates.AppendElement(layers::OpPushExternalImageForTexture(
|
|
|
|
aExtId, aKey, nullptr, aTexture->GetIPDLActor(), aIsUpdate));
|
2018-07-06 15:25:33 +03:00
|
|
|
}
|
|
|
|
|
2017-09-14 19:48:55 +03:00
|
|
|
bool IpcResourceUpdateQueue::UpdateImageBuffer(
|
|
|
|
ImageKey aKey, const ImageDescriptor& aDescriptor, Range<uint8_t> aBytes) {
|
|
|
|
auto bytes = mWriter.Write(aBytes);
|
2017-10-12 05:25:34 +03:00
|
|
|
if (!bytes.length()) {
|
|
|
|
return false;
|
|
|
|
}
|
2017-09-14 19:48:55 +03:00
|
|
|
mUpdates.AppendElement(layers::OpUpdateImage(aDescriptor, bytes, aKey));
|
2017-10-12 05:25:34 +03:00
|
|
|
return true;
|
2017-09-14 19:48:55 +03:00
|
|
|
}
|
|
|
|
|
2018-11-24 02:33:49 +03:00
|
|
|
bool IpcResourceUpdateQueue::UpdateBlobImage(BlobImageKey aKey,
|
2017-09-14 19:48:55 +03:00
|
|
|
const ImageDescriptor& aDescriptor,
|
2017-08-03 23:38:33 +03:00
|
|
|
Range<uint8_t> aBytes,
|
2019-07-13 22:07:05 +03:00
|
|
|
ImageIntRect aVisibleRect,
|
2017-08-03 23:38:33 +03:00
|
|
|
ImageIntRect aDirtyRect) {
|
2019-09-17 12:13:06 +03:00
|
|
|
MOZ_ASSERT(aVisibleRect.width > 0 && aVisibleRect.height > 0);
|
|
|
|
|
2017-09-14 19:48:55 +03:00
|
|
|
auto bytes = mWriter.Write(aBytes);
|
2017-10-12 05:25:34 +03:00
|
|
|
if (!bytes.length()) {
|
|
|
|
return false;
|
|
|
|
}
|
2019-07-13 22:07:05 +03:00
|
|
|
mUpdates.AppendElement(layers::OpUpdateBlobImage(aDescriptor, bytes, aKey,
|
|
|
|
aVisibleRect, aDirtyRect));
|
2017-10-12 05:25:34 +03:00
|
|
|
return true;
|
2017-09-14 19:48:55 +03:00
|
|
|
}
|
|
|
|
|
2018-07-12 18:43:09 +03:00
|
|
|
void IpcResourceUpdateQueue::UpdateExternalImage(wr::ExternalImageId aExtId,
|
|
|
|
wr::ImageKey aKey,
|
|
|
|
ImageIntRect aDirtyRect) {
|
|
|
|
mUpdates.AppendElement(
|
|
|
|
layers::OpUpdateExternalImage(aExtId, aKey, aDirtyRect));
|
|
|
|
}
|
|
|
|
|
2018-11-24 02:33:49 +03:00
|
|
|
void IpcResourceUpdateQueue::SetBlobImageVisibleArea(
|
|
|
|
wr::BlobImageKey aKey, const ImageIntRect& aArea) {
|
2019-07-13 22:07:05 +03:00
|
|
|
mUpdates.AppendElement(layers::OpSetBlobImageVisibleArea(aArea, aKey));
|
2018-07-19 23:33:05 +03:00
|
|
|
}
|
|
|
|
|
2017-09-14 19:48:55 +03:00
|
|
|
void IpcResourceUpdateQueue::DeleteImage(ImageKey aKey) {
|
|
|
|
mUpdates.AppendElement(layers::OpDeleteImage(aKey));
|
|
|
|
}
|
|
|
|
|
2018-11-24 02:33:49 +03:00
|
|
|
void IpcResourceUpdateQueue::DeleteBlobImage(BlobImageKey aKey) {
|
|
|
|
mUpdates.AppendElement(layers::OpDeleteBlobImage(aKey));
|
|
|
|
}
|
|
|
|
|
2017-09-14 19:48:55 +03:00
|
|
|
bool IpcResourceUpdateQueue::AddRawFont(wr::FontKey aKey, Range<uint8_t> aBytes,
|
|
|
|
uint32_t aIndex) {
|
|
|
|
auto bytes = mWriter.Write(aBytes);
|
2017-10-12 05:25:34 +03:00
|
|
|
if (!bytes.length()) {
|
|
|
|
return false;
|
|
|
|
}
|
2017-09-14 19:48:55 +03:00
|
|
|
mUpdates.AppendElement(layers::OpAddRawFont(bytes, aIndex, aKey));
|
2017-10-12 05:25:34 +03:00
|
|
|
return true;
|
2017-09-14 19:48:55 +03:00
|
|
|
}
|
|
|
|
|
2017-11-07 04:19:46 +03:00
|
|
|
bool IpcResourceUpdateQueue::AddFontDescriptor(wr::FontKey aKey,
|
|
|
|
Range<uint8_t> aBytes,
|
|
|
|
uint32_t aIndex) {
|
|
|
|
auto bytes = mWriter.Write(aBytes);
|
|
|
|
if (!bytes.length()) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
mUpdates.AppendElement(layers::OpAddFontDescriptor(bytes, aIndex, aKey));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-09-14 19:48:55 +03:00
|
|
|
void IpcResourceUpdateQueue::DeleteFont(wr::FontKey aKey) {
|
|
|
|
mUpdates.AppendElement(layers::OpDeleteFont(aKey));
|
|
|
|
}
|
|
|
|
|
|
|
|
void IpcResourceUpdateQueue::AddFontInstance(
|
|
|
|
wr::FontInstanceKey aKey, wr::FontKey aFontKey, float aGlyphSize,
|
|
|
|
const wr::FontInstanceOptions* aOptions,
|
2017-09-21 06:18:23 +03:00
|
|
|
const wr::FontInstancePlatformOptions* aPlatformOptions,
|
|
|
|
Range<const gfx::FontVariation> aVariations) {
|
|
|
|
auto bytes = mWriter.WriteAsBytes(aVariations);
|
2017-09-14 19:48:55 +03:00
|
|
|
mUpdates.AppendElement(layers::OpAddFontInstance(
|
|
|
|
aOptions ? Some(*aOptions) : Nothing(),
|
|
|
|
aPlatformOptions ? Some(*aPlatformOptions) : Nothing(), bytes, aKey,
|
|
|
|
aFontKey, aGlyphSize));
|
|
|
|
}
|
|
|
|
|
|
|
|
void IpcResourceUpdateQueue::DeleteFontInstance(wr::FontInstanceKey aKey) {
|
|
|
|
mUpdates.AppendElement(layers::OpDeleteFontInstance(aKey));
|
|
|
|
}
|
|
|
|
|
|
|
|
void IpcResourceUpdateQueue::Flush(
|
|
|
|
nsTArray<layers::OpUpdateResource>& aUpdates,
|
2018-01-12 17:11:32 +03:00
|
|
|
nsTArray<layers::RefCountedShmem>& aSmallAllocs,
|
2017-09-20 14:39:19 +03:00
|
|
|
nsTArray<ipc::Shmem>& aLargeAllocs) {
|
2017-09-14 19:48:55 +03:00
|
|
|
aUpdates.Clear();
|
|
|
|
mUpdates.SwapElements(aUpdates);
|
2017-09-20 14:39:19 +03:00
|
|
|
mWriter.Flush(aSmallAllocs, aLargeAllocs);
|
2017-09-14 19:48:55 +03:00
|
|
|
}
|
|
|
|
|
2018-02-22 19:13:31 +03:00
|
|
|
bool IpcResourceUpdateQueue::IsEmpty() const {
|
|
|
|
if (mUpdates.Length() == 0) {
|
|
|
|
MOZ_ASSERT(mWriter.IsEmpty());
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-09-14 19:48:55 +03:00
|
|
|
void IpcResourceUpdateQueue::Clear() {
|
|
|
|
mWriter.Clear();
|
|
|
|
mUpdates.Clear();
|
2019-03-22 21:28:42 +03:00
|
|
|
|
|
|
|
for (auto& subQueue : mSubQueues) {
|
|
|
|
if (subQueue) {
|
|
|
|
subQueue->Clear();
|
|
|
|
}
|
|
|
|
}
|
2017-09-14 19:48:55 +03:00
|
|
|
}
|
|
|
|
|
2018-01-12 17:11:32 +03:00
|
|
|
// static
|
|
|
|
void IpcResourceUpdateQueue::ReleaseShmems(
|
|
|
|
ipc::IProtocol* aShmAllocator, nsTArray<layers::RefCountedShmem>& aShms) {
|
|
|
|
for (auto& shm : aShms) {
|
|
|
|
if (RefCountedShm::IsValid(shm) && RefCountedShm::Release(shm) == 0) {
|
|
|
|
RefCountedShm::Dealloc(aShmAllocator, shm);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
aShms.Clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
// static
|
|
|
|
void IpcResourceUpdateQueue::ReleaseShmems(ipc::IProtocol* aShmAllocator,
|
|
|
|
nsTArray<ipc::Shmem>& aShms) {
|
|
|
|
for (auto& shm : aShms) {
|
|
|
|
aShmAllocator->DeallocShmem(shm);
|
|
|
|
}
|
|
|
|
aShms.Clear();
|
|
|
|
}
|
|
|
|
|
2017-09-14 19:48:55 +03:00
|
|
|
} // namespace wr
|
|
|
|
} // namespace mozilla
|