2019-08-16 04:30:02 +03:00
|
|
|
/* -*- Mode: C++; tab-width: 20; indent-tabs-mode: nullptr; c-basic-offset: 2 -*-
|
|
|
|
* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
|
|
|
|
|
|
|
#import "mozilla/layers/NativeLayerCA.h"
|
|
|
|
|
|
|
|
#import <QuartzCore/QuartzCore.h>
|
|
|
|
#import <CoreVideo/CVPixelBuffer.h>
|
|
|
|
|
|
|
|
#include <utility>
|
|
|
|
#include <algorithm>
|
|
|
|
|
2019-09-02 01:35:56 +03:00
|
|
|
#include "GLContextCGL.h"
|
|
|
|
#include "MozFramebuffer.h"
|
|
|
|
#include "ScopedGLHelpers.h"
|
|
|
|
|
2019-08-20 01:54:26 +03:00
|
|
|
@interface CALayer (PrivateSetContentsOpaque)
|
|
|
|
- (void)setContentsOpaque:(BOOL)opaque;
|
|
|
|
@end
|
|
|
|
|
2019-08-16 04:30:02 +03:00
|
|
|
namespace mozilla {
|
|
|
|
namespace layers {
|
|
|
|
|
|
|
|
using gfx::IntPoint;
|
|
|
|
using gfx::IntSize;
|
|
|
|
using gfx::IntRect;
|
|
|
|
using gfx::IntRegion;
|
|
|
|
|
|
|
|
/* static */ already_AddRefed<NativeLayerRootCA> NativeLayerRootCA::CreateForCALayer(
|
|
|
|
CALayer* aLayer) {
|
|
|
|
RefPtr<NativeLayerRootCA> layerRoot = new NativeLayerRootCA(aLayer);
|
|
|
|
return layerRoot.forget();
|
|
|
|
}
|
|
|
|
|
|
|
|
NativeLayerRootCA::NativeLayerRootCA(CALayer* aLayer)
|
2019-08-16 04:52:55 +03:00
|
|
|
: mMutex("NativeLayerRootCA"), mRootCALayer([aLayer retain]) {}
|
2019-08-16 04:30:02 +03:00
|
|
|
|
|
|
|
NativeLayerRootCA::~NativeLayerRootCA() {
|
|
|
|
MOZ_RELEASE_ASSERT(mSublayers.IsEmpty(),
|
|
|
|
"Please clear all layers before destroying the layer root.");
|
|
|
|
|
|
|
|
// FIXME: mMutated might be true at this point, which would indicate that, even
|
|
|
|
// though mSublayers is empty now, this state may not yet have been synced to
|
|
|
|
// the underlying CALayer. In other words, mRootCALayer might still have sublayers.
|
|
|
|
// Should we do anything about that?
|
|
|
|
// We could just clear mRootCALayer's sublayers now, but doing so would be a
|
|
|
|
// layer tree transformation outside of a transaction, which we want to avoid.
|
|
|
|
// But we also don't want to trigger a transaction just for clearing the
|
|
|
|
// window's layers. And we wouldn't expect a NativeLayerRootCA to be destroyed
|
|
|
|
// while the window is still open and visible. Are layer tree modifications
|
|
|
|
// outside of CATransactions allowed while the window is closed? Who knows.
|
|
|
|
|
|
|
|
[mRootCALayer release];
|
|
|
|
}
|
|
|
|
|
|
|
|
already_AddRefed<NativeLayer> NativeLayerRootCA::CreateLayer() {
|
|
|
|
RefPtr<NativeLayer> layer = new NativeLayerCA();
|
|
|
|
return layer.forget();
|
|
|
|
}
|
|
|
|
|
|
|
|
void NativeLayerRootCA::AppendLayer(NativeLayer* aLayer) {
|
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
|
|
|
|
RefPtr<NativeLayerCA> layerCA = aLayer->AsNativeLayerCA();
|
|
|
|
MOZ_RELEASE_ASSERT(layerCA);
|
|
|
|
|
|
|
|
mSublayers.AppendElement(layerCA);
|
|
|
|
layerCA->SetBackingScale(mBackingScale);
|
|
|
|
mMutated = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void NativeLayerRootCA::RemoveLayer(NativeLayer* aLayer) {
|
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
|
|
|
|
RefPtr<NativeLayerCA> layerCA = aLayer->AsNativeLayerCA();
|
|
|
|
MOZ_RELEASE_ASSERT(layerCA);
|
|
|
|
|
|
|
|
mSublayers.RemoveElement(layerCA);
|
|
|
|
mMutated = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Must be called within a current CATransaction on the transaction's thread.
|
|
|
|
void NativeLayerRootCA::ApplyChanges() {
|
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
|
|
|
|
[CATransaction setDisableActions:YES];
|
|
|
|
|
|
|
|
// Call ApplyChanges on our sublayers first, and then update the root layer's
|
|
|
|
// list of sublayers. The order is important because we need layer->UnderlyingCALayer()
|
|
|
|
// to be non-null, and the underlying CALayer gets lazily initialized in ApplyChanges().
|
|
|
|
for (auto layer : mSublayers) {
|
|
|
|
layer->ApplyChanges();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mMutated) {
|
|
|
|
NSMutableArray<CALayer*>* sublayers = [NSMutableArray arrayWithCapacity:mSublayers.Length()];
|
|
|
|
for (auto layer : mSublayers) {
|
|
|
|
[sublayers addObject:layer->UnderlyingCALayer()];
|
|
|
|
}
|
|
|
|
mRootCALayer.sublayers = sublayers;
|
|
|
|
mMutated = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void NativeLayerRootCA::SetBackingScale(float aBackingScale) {
|
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
|
|
|
|
mBackingScale = aBackingScale;
|
|
|
|
for (auto layer : mSublayers) {
|
|
|
|
layer->SetBackingScale(aBackingScale);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
NativeLayerCA::NativeLayerCA() : mMutex("NativeLayerCA") {}
|
|
|
|
|
|
|
|
NativeLayerCA::~NativeLayerCA() {
|
|
|
|
SetSurfaceRegistry(nullptr); // or maybe MOZ_RELEASE_ASSERT(!mSurfaceRegistry) would be better?
|
|
|
|
|
2019-09-02 02:22:04 +03:00
|
|
|
if (mInProgressLockedIOSurface) {
|
|
|
|
mInProgressLockedIOSurface->Unlock(false);
|
|
|
|
mInProgressLockedIOSurface = nullptr;
|
|
|
|
}
|
2019-08-16 04:30:02 +03:00
|
|
|
if (mInProgressSurface) {
|
|
|
|
IOSurfaceDecrementUseCount(mInProgressSurface->mSurface.get());
|
|
|
|
}
|
|
|
|
if (mReadySurface) {
|
|
|
|
IOSurfaceDecrementUseCount(mReadySurface->mSurface.get());
|
|
|
|
}
|
|
|
|
|
2019-08-20 01:54:26 +03:00
|
|
|
for (CALayer* contentLayer : mContentCALayers) {
|
|
|
|
[contentLayer release];
|
|
|
|
}
|
|
|
|
[mWrappingCALayer release];
|
2019-08-16 04:30:02 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void NativeLayerCA::SetSurfaceRegistry(RefPtr<IOSurfaceRegistry> aSurfaceRegistry) {
|
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
|
|
|
|
if (mSurfaceRegistry) {
|
|
|
|
for (auto surf : mSurfaces) {
|
|
|
|
mSurfaceRegistry->UnregisterSurface(surf.mSurface);
|
|
|
|
}
|
|
|
|
if (mInProgressSurface) {
|
|
|
|
mSurfaceRegistry->UnregisterSurface(mInProgressSurface->mSurface);
|
|
|
|
}
|
|
|
|
if (mReadySurface) {
|
|
|
|
mSurfaceRegistry->UnregisterSurface(mReadySurface->mSurface);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mSurfaceRegistry = aSurfaceRegistry;
|
|
|
|
if (mSurfaceRegistry) {
|
|
|
|
for (auto surf : mSurfaces) {
|
|
|
|
mSurfaceRegistry->RegisterSurface(surf.mSurface);
|
|
|
|
}
|
|
|
|
if (mInProgressSurface) {
|
|
|
|
mSurfaceRegistry->RegisterSurface(mInProgressSurface->mSurface);
|
|
|
|
}
|
|
|
|
if (mReadySurface) {
|
|
|
|
mSurfaceRegistry->RegisterSurface(mReadySurface->mSurface);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
RefPtr<IOSurfaceRegistry> NativeLayerCA::GetSurfaceRegistry() {
|
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
|
|
|
|
return mSurfaceRegistry;
|
|
|
|
}
|
|
|
|
|
|
|
|
void NativeLayerCA::SetSurfaceIsFlipped(bool aIsFlipped) {
|
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
|
2019-08-20 01:54:26 +03:00
|
|
|
if (aIsFlipped != mSurfaceIsFlipped) {
|
|
|
|
mSurfaceIsFlipped = aIsFlipped;
|
|
|
|
mMutatedGeometry = true;
|
|
|
|
}
|
2019-08-16 04:30:02 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
bool NativeLayerCA::SurfaceIsFlipped() {
|
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
|
|
|
|
return mSurfaceIsFlipped;
|
|
|
|
}
|
|
|
|
|
|
|
|
void NativeLayerCA::SetRect(const IntRect& aRect) {
|
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
|
2019-08-20 01:54:26 +03:00
|
|
|
if (aRect.TopLeft() != mPosition) {
|
|
|
|
mPosition = aRect.TopLeft();
|
|
|
|
mMutatedPosition = true;
|
|
|
|
}
|
|
|
|
if (aRect.Size() != mSize) {
|
|
|
|
mSize = aRect.Size();
|
|
|
|
mMutatedGeometry = true;
|
|
|
|
}
|
2019-08-16 04:30:02 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
IntRect NativeLayerCA::GetRect() {
|
|
|
|
MutexAutoLock lock(mMutex);
|
2019-08-20 01:54:26 +03:00
|
|
|
return IntRect(mPosition, mSize);
|
2019-08-16 04:30:02 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void NativeLayerCA::SetBackingScale(float aBackingScale) {
|
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
|
2019-08-20 01:54:26 +03:00
|
|
|
if (aBackingScale != mBackingScale) {
|
|
|
|
mBackingScale = aBackingScale;
|
|
|
|
mMutatedGeometry = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void NativeLayerCA::SetOpaqueRegion(const gfx::IntRegion& aRegion) {
|
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
|
|
|
|
if (aRegion != mOpaqueRegion) {
|
|
|
|
mOpaqueRegion = aRegion;
|
|
|
|
mMutatedGeometry = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
gfx::IntRegion NativeLayerCA::OpaqueRegion() {
|
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
return mOpaqueRegion;
|
2019-08-16 04:30:02 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
IntRegion NativeLayerCA::CurrentSurfaceInvalidRegion() {
|
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
|
|
|
|
MOZ_RELEASE_ASSERT(
|
|
|
|
mInProgressSurface,
|
|
|
|
"Only call currentSurfaceInvalidRegion after a call to NextSurface and before the call "
|
|
|
|
"to notifySurfaceIsReady.");
|
|
|
|
return mInProgressSurface->mInvalidRegion;
|
|
|
|
}
|
|
|
|
|
|
|
|
void NativeLayerCA::InvalidateRegionThroughoutSwapchain(const IntRegion& aRegion) {
|
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
|
|
|
|
IntRegion r = aRegion;
|
2019-08-20 01:54:26 +03:00
|
|
|
r.AndWith(IntRect(IntPoint(0, 0), mSize));
|
2019-08-16 04:30:02 +03:00
|
|
|
if (mInProgressSurface) {
|
|
|
|
mInProgressSurface->mInvalidRegion.OrWith(r);
|
|
|
|
}
|
|
|
|
if (mReadySurface) {
|
|
|
|
mReadySurface->mInvalidRegion.OrWith(r);
|
|
|
|
}
|
|
|
|
for (auto& surf : mSurfaces) {
|
|
|
|
surf.mInvalidRegion.OrWith(r);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
CFTypeRefPtr<IOSurfaceRef> NativeLayerCA::NextSurface() {
|
|
|
|
MutexAutoLock lock(mMutex);
|
2019-09-02 02:22:04 +03:00
|
|
|
return NextSurfaceLocked(lock);
|
|
|
|
}
|
2019-08-16 04:30:02 +03:00
|
|
|
|
2019-09-02 02:22:04 +03:00
|
|
|
CFTypeRefPtr<IOSurfaceRef> NativeLayerCA::NextSurfaceLocked(const MutexAutoLock& aLock) {
|
2019-08-20 01:54:26 +03:00
|
|
|
IntSize surfaceSize = mSize;
|
2019-08-16 04:30:02 +03:00
|
|
|
if (surfaceSize.IsEmpty()) {
|
|
|
|
NSLog(@"NextSurface returning nullptr because of invalid surfaceSize (%d, %d).",
|
|
|
|
surfaceSize.width, surfaceSize.height);
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
MOZ_RELEASE_ASSERT(
|
|
|
|
!mInProgressSurface,
|
|
|
|
"ERROR: Do not call NextSurface twice in sequence. Call NotifySurfaceReady before the "
|
|
|
|
"next call to NextSurface.");
|
|
|
|
|
|
|
|
// Find the last surface in unusedSurfaces which has the right size. If such
|
|
|
|
// a surface exists, it is the surface we will recycle.
|
2019-09-02 02:22:04 +03:00
|
|
|
std::vector<SurfaceWithInvalidRegion> unusedSurfaces = RemoveExcessUnusedSurfaces(aLock);
|
2019-08-16 04:30:02 +03:00
|
|
|
auto surfIter = std::find_if(
|
|
|
|
unusedSurfaces.rbegin(), unusedSurfaces.rend(),
|
|
|
|
[surfaceSize](const SurfaceWithInvalidRegion& s) { return s.mSize == surfaceSize; });
|
|
|
|
|
|
|
|
Maybe<SurfaceWithInvalidRegion> surf;
|
|
|
|
if (surfIter != unusedSurfaces.rend()) {
|
|
|
|
// We found the surface we want to recycle.
|
|
|
|
surf = Some(*surfIter);
|
|
|
|
|
|
|
|
// Remove surf from unusedSurfaces.
|
|
|
|
// The reverse iterator makes this a bit cumbersome.
|
|
|
|
unusedSurfaces.erase(std::next(surfIter).base());
|
|
|
|
} else {
|
|
|
|
CFTypeRefPtr<IOSurfaceRef> newSurf = CFTypeRefPtr<IOSurfaceRef>::WrapUnderCreateRule(
|
|
|
|
IOSurfaceCreate((__bridge CFDictionaryRef) @{
|
|
|
|
(__bridge NSString*)kIOSurfaceWidth : @(surfaceSize.width),
|
|
|
|
(__bridge NSString*)kIOSurfaceHeight : @(surfaceSize.height),
|
|
|
|
(__bridge NSString*)kIOSurfacePixelFormat : @(kCVPixelFormatType_32BGRA),
|
|
|
|
(__bridge NSString*)kIOSurfaceBytesPerElement : @(4),
|
|
|
|
}));
|
|
|
|
if (!newSurf) {
|
|
|
|
NSLog(@"NextSurface returning nullptr because IOSurfaceCreate failed to create the surface.");
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
if (mSurfaceRegistry) {
|
|
|
|
mSurfaceRegistry->RegisterSurface(newSurf);
|
|
|
|
}
|
|
|
|
surf =
|
|
|
|
Some(SurfaceWithInvalidRegion{newSurf, IntRect(IntPoint(0, 0), surfaceSize), surfaceSize});
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete all other unused surfaces.
|
2019-09-03 03:33:31 +03:00
|
|
|
for (auto unusedSurf : unusedSurfaces) {
|
|
|
|
if (mSurfaceRegistry) {
|
2019-08-16 04:30:02 +03:00
|
|
|
mSurfaceRegistry->UnregisterSurface(unusedSurf.mSurface);
|
|
|
|
}
|
2019-09-03 03:33:31 +03:00
|
|
|
mFramebuffers.erase(unusedSurf.mSurface);
|
2019-08-16 04:30:02 +03:00
|
|
|
}
|
|
|
|
unusedSurfaces.clear();
|
|
|
|
|
|
|
|
MOZ_RELEASE_ASSERT(surf);
|
|
|
|
mInProgressSurface = std::move(surf);
|
|
|
|
IOSurfaceIncrementUseCount(mInProgressSurface->mSurface.get());
|
|
|
|
return mInProgressSurface->mSurface;
|
|
|
|
}
|
|
|
|
|
2019-09-02 02:22:04 +03:00
|
|
|
RefPtr<gfx::DrawTarget> NativeLayerCA::NextSurfaceAsDrawTarget(gfx::BackendType aBackendType) {
|
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
CFTypeRefPtr<IOSurfaceRef> surface = NextSurfaceLocked(lock);
|
|
|
|
if (!surface) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
mInProgressLockedIOSurface = new MacIOSurface(std::move(surface));
|
|
|
|
mInProgressLockedIOSurface->Lock(false);
|
|
|
|
return mInProgressLockedIOSurface->GetAsDrawTargetLocked(aBackendType);
|
|
|
|
}
|
|
|
|
|
2019-09-02 01:35:56 +03:00
|
|
|
void NativeLayerCA::SetGLContext(gl::GLContext* aContext) {
|
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
|
|
|
|
RefPtr<gl::GLContextCGL> glContextCGL = gl::GLContextCGL::Cast(aContext);
|
|
|
|
MOZ_RELEASE_ASSERT(glContextCGL, "Unexpected GLContext type");
|
|
|
|
|
|
|
|
if (glContextCGL != mGLContext) {
|
|
|
|
mFramebuffers.clear();
|
|
|
|
mGLContext = glContextCGL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
gl::GLContext* NativeLayerCA::GetGLContext() {
|
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
return mGLContext;
|
|
|
|
}
|
|
|
|
|
|
|
|
Maybe<GLuint> NativeLayerCA::NextSurfaceAsFramebuffer(bool aNeedsDepth) {
|
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
CFTypeRefPtr<IOSurfaceRef> surface = NextSurfaceLocked(lock);
|
|
|
|
if (!surface) {
|
|
|
|
return Nothing();
|
|
|
|
}
|
|
|
|
|
|
|
|
return Some(GetOrCreateFramebufferForSurface(lock, std::move(surface), aNeedsDepth));
|
|
|
|
}
|
|
|
|
|
|
|
|
GLuint NativeLayerCA::GetOrCreateFramebufferForSurface(const MutexAutoLock&,
|
|
|
|
CFTypeRefPtr<IOSurfaceRef> aSurface,
|
|
|
|
bool aNeedsDepth) {
|
|
|
|
auto fbCursor = mFramebuffers.find(aSurface);
|
|
|
|
if (fbCursor != mFramebuffers.end()) {
|
|
|
|
return fbCursor->second->mFB;
|
|
|
|
}
|
|
|
|
|
|
|
|
MOZ_RELEASE_ASSERT(
|
|
|
|
mGLContext, "Only call NextSurfaceAsFramebuffer when a GLContext is set on this NativeLayer");
|
|
|
|
mGLContext->MakeCurrent();
|
|
|
|
GLuint tex = mGLContext->CreateTexture();
|
|
|
|
{
|
|
|
|
const gl::ScopedBindTexture bindTex(mGLContext, tex, LOCAL_GL_TEXTURE_RECTANGLE_ARB);
|
|
|
|
CGLTexImageIOSurface2D(mGLContext->GetCGLContext(), LOCAL_GL_TEXTURE_RECTANGLE_ARB,
|
|
|
|
LOCAL_GL_RGBA, mSize.width, mSize.height, LOCAL_GL_BGRA,
|
|
|
|
LOCAL_GL_UNSIGNED_INT_8_8_8_8_REV, aSurface.get(), 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
auto fb = gl::MozFramebuffer::CreateWith(mGLContext, mSize, 0, aNeedsDepth,
|
|
|
|
LOCAL_GL_TEXTURE_RECTANGLE_ARB, tex);
|
|
|
|
GLuint fbo = fb->mFB;
|
|
|
|
mFramebuffers.insert({aSurface, std::move(fb)});
|
|
|
|
|
|
|
|
return fbo;
|
|
|
|
}
|
|
|
|
|
2019-08-16 04:30:02 +03:00
|
|
|
void NativeLayerCA::NotifySurfaceReady() {
|
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
|
|
|
|
MOZ_RELEASE_ASSERT(mInProgressSurface,
|
|
|
|
"NotifySurfaceReady called without preceding call to NextSurface");
|
|
|
|
if (mReadySurface) {
|
|
|
|
IOSurfaceDecrementUseCount(mReadySurface->mSurface.get());
|
|
|
|
mSurfaces.push_back(*mReadySurface);
|
|
|
|
mReadySurface = Nothing();
|
|
|
|
}
|
2019-09-02 02:22:04 +03:00
|
|
|
|
|
|
|
if (mInProgressLockedIOSurface) {
|
|
|
|
mInProgressLockedIOSurface->Unlock(false);
|
|
|
|
mInProgressLockedIOSurface = nullptr;
|
|
|
|
}
|
|
|
|
|
2019-08-16 04:30:02 +03:00
|
|
|
mReadySurface = std::move(mInProgressSurface);
|
|
|
|
mReadySurface->mInvalidRegion = IntRect();
|
|
|
|
}
|
|
|
|
|
|
|
|
void NativeLayerCA::ApplyChanges() {
|
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
|
2019-08-20 01:54:26 +03:00
|
|
|
if (!mWrappingCALayer) {
|
|
|
|
mWrappingCALayer = [[CALayer layer] retain];
|
|
|
|
mWrappingCALayer.position = NSZeroPoint;
|
|
|
|
mWrappingCALayer.bounds = NSZeroRect;
|
|
|
|
mWrappingCALayer.anchorPoint = NSZeroPoint;
|
|
|
|
mWrappingCALayer.contentsGravity = kCAGravityTopLeft;
|
2019-08-16 04:30:02 +03:00
|
|
|
}
|
|
|
|
|
2019-08-20 01:54:26 +03:00
|
|
|
if (mMutatedPosition || mMutatedGeometry) {
|
|
|
|
mWrappingCALayer.position =
|
|
|
|
CGPointMake(mPosition.x / mBackingScale, mPosition.y / mBackingScale);
|
|
|
|
mMutatedPosition = false;
|
2019-08-16 04:30:02 +03:00
|
|
|
}
|
|
|
|
|
2019-08-20 01:54:26 +03:00
|
|
|
if (mMutatedGeometry) {
|
|
|
|
mWrappingCALayer.bounds =
|
|
|
|
CGRectMake(0, 0, mSize.width / mBackingScale, mSize.height / mBackingScale);
|
|
|
|
|
|
|
|
// Assemble opaque and transparent sublayers to cover the respective regions.
|
|
|
|
// mContentCALayers has the current sublayers. We will try to re-use layers
|
|
|
|
// as much as possible.
|
|
|
|
IntRegion opaqueRegion;
|
|
|
|
opaqueRegion.And(IntRect(IntPoint(), mSize), mOpaqueRegion);
|
|
|
|
IntRegion transparentRegion;
|
|
|
|
transparentRegion.Sub(IntRect(IntPoint(), mSize), opaqueRegion);
|
|
|
|
std::deque<CALayer*> layersToRecycle = std::move(mContentCALayers);
|
|
|
|
PlaceContentLayers(lock, opaqueRegion, true, &layersToRecycle);
|
|
|
|
PlaceContentLayers(lock, transparentRegion, false, &layersToRecycle);
|
|
|
|
for (CALayer* unusedLayer : layersToRecycle) {
|
|
|
|
[unusedLayer release];
|
|
|
|
}
|
|
|
|
NSMutableArray<CALayer*>* sublayers =
|
|
|
|
[NSMutableArray arrayWithCapacity:mContentCALayers.size()];
|
|
|
|
for (auto layer : mContentCALayers) {
|
|
|
|
[sublayers addObject:layer];
|
|
|
|
}
|
|
|
|
mWrappingCALayer.sublayers = sublayers;
|
|
|
|
mMutatedGeometry = false;
|
|
|
|
}
|
2019-08-16 04:30:02 +03:00
|
|
|
|
|
|
|
if (mReadySurface) {
|
2019-08-20 01:54:26 +03:00
|
|
|
for (CALayer* layer : mContentCALayers) {
|
|
|
|
layer.contents = (id)mReadySurface->mSurface.get();
|
|
|
|
}
|
2019-08-16 04:30:02 +03:00
|
|
|
IOSurfaceDecrementUseCount(mReadySurface->mSurface.get());
|
|
|
|
mSurfaces.push_back(*mReadySurface);
|
|
|
|
mReadySurface = Nothing();
|
|
|
|
}
|
2019-08-20 01:54:26 +03:00
|
|
|
}
|
2019-08-16 04:30:02 +03:00
|
|
|
|
2019-08-20 01:54:26 +03:00
|
|
|
void NativeLayerCA::PlaceContentLayers(const MutexAutoLock&, const IntRegion& aRegion, bool aOpaque,
|
|
|
|
std::deque<CALayer*>* aLayersToRecycle) {
|
|
|
|
for (auto iter = aRegion.RectIter(); !iter.Done(); iter.Next()) {
|
|
|
|
IntRect r = iter.Get();
|
2019-08-16 04:30:02 +03:00
|
|
|
|
2019-08-20 01:54:26 +03:00
|
|
|
CALayer* layer;
|
|
|
|
if (aLayersToRecycle->empty()) {
|
|
|
|
layer = [[CALayer layer] retain];
|
|
|
|
layer.anchorPoint = NSZeroPoint;
|
|
|
|
layer.contentsGravity = kCAGravityTopLeft;
|
|
|
|
} else {
|
|
|
|
layer = aLayersToRecycle->front();
|
|
|
|
aLayersToRecycle->pop_front();
|
|
|
|
}
|
|
|
|
layer.position = CGPointMake(r.x / mBackingScale, r.y / mBackingScale);
|
|
|
|
layer.bounds = CGRectMake(0, 0, r.width / mBackingScale, r.height / mBackingScale);
|
|
|
|
layer.contentsScale = mBackingScale;
|
|
|
|
CGRect unitContentsRect =
|
|
|
|
CGRectMake(CGFloat(r.x) / mSize.width, CGFloat(r.y) / mSize.height,
|
|
|
|
CGFloat(r.width) / mSize.width, CGFloat(r.height) / mSize.height);
|
|
|
|
if (mSurfaceIsFlipped) {
|
|
|
|
CGFloat height = r.height / mBackingScale;
|
|
|
|
layer.affineTransform = CGAffineTransformMake(1.0, 0.0, 0.0, -1.0, 0.0, height);
|
|
|
|
unitContentsRect.origin.y = 1.0 - (unitContentsRect.origin.y + unitContentsRect.size.height);
|
|
|
|
layer.contentsRect = unitContentsRect;
|
|
|
|
} else {
|
|
|
|
layer.affineTransform = CGAffineTransformIdentity;
|
|
|
|
layer.contentsRect = unitContentsRect;
|
|
|
|
}
|
|
|
|
layer.opaque = aOpaque;
|
|
|
|
if ([layer respondsToSelector:@selector(setContentsOpaque:)]) {
|
|
|
|
// The opaque property seems to not be enough when using IOSurface contents.
|
|
|
|
// Additionally, call the private method setContentsOpaque.
|
|
|
|
[layer setContentsOpaque:aOpaque];
|
|
|
|
}
|
|
|
|
mContentCALayers.push_back(layer);
|
|
|
|
}
|
2019-08-16 04:30:02 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Called when mMutex is already being held by the current thread.
|
|
|
|
std::vector<NativeLayerCA::SurfaceWithInvalidRegion> NativeLayerCA::RemoveExcessUnusedSurfaces(
|
|
|
|
const MutexAutoLock&) {
|
|
|
|
std::vector<SurfaceWithInvalidRegion> usedSurfaces;
|
|
|
|
std::vector<SurfaceWithInvalidRegion> unusedSurfaces;
|
|
|
|
|
|
|
|
// Separate mSurfaces into used and unused surfaces, leaving 2 surfaces behind.
|
|
|
|
while (mSurfaces.size() > 2) {
|
|
|
|
auto surf = std::move(mSurfaces.front());
|
|
|
|
mSurfaces.pop_front();
|
|
|
|
if (IOSurfaceIsInUse(surf.mSurface.get())) {
|
|
|
|
usedSurfaces.push_back(std::move(surf));
|
|
|
|
} else {
|
|
|
|
unusedSurfaces.push_back(std::move(surf));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Put the used surfaces back into mSurfaces, at the beginning.
|
|
|
|
mSurfaces.insert(mSurfaces.begin(), usedSurfaces.begin(), usedSurfaces.end());
|
|
|
|
|
|
|
|
return unusedSurfaces;
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace layers
|
|
|
|
} // namespace mozilla
|