зеркало из https://github.com/mozilla/gecko-dev.git
Bug 538323. Part 2: use ImageLayers to render video. r=kinetik
This commit is contained in:
Родитель
6b6667800c
Коммит
2abce146a9
|
@ -44,6 +44,7 @@
|
|||
#include "nsCycleCollectionParticipant.h"
|
||||
#include "nsILoadGroup.h"
|
||||
#include "nsIObserver.h"
|
||||
#include "ImageLayers.h"
|
||||
|
||||
// Define to output information on decoding and painting framerate
|
||||
/* #define DEBUG_FRAME_RATE 1 */
|
||||
|
@ -54,6 +55,8 @@ typedef PRUint16 nsMediaReadyState;
|
|||
class nsHTMLMediaElement : public nsGenericHTMLElement,
|
||||
public nsIObserver
|
||||
{
|
||||
typedef mozilla::layers::ImageContainer ImageContainer;
|
||||
|
||||
public:
|
||||
nsHTMLMediaElement(nsINodeInfo *aNodeInfo, PRBool aFromParser = PR_FALSE);
|
||||
virtual ~nsHTMLMediaElement();
|
||||
|
@ -159,11 +162,13 @@ public:
|
|||
// (no data has arrived for a while).
|
||||
void DownloadStalled();
|
||||
|
||||
// Draw the latest video data. See nsMediaDecoder for
|
||||
// details.
|
||||
void Paint(gfxContext* aContext,
|
||||
gfxPattern::GraphicsFilter aFilter,
|
||||
const gfxRect& aRect);
|
||||
// Called by the media decoder and the video frame to get the
|
||||
// ImageContainer containing the video data.
|
||||
ImageContainer* GetImageContainer();
|
||||
|
||||
// Called by the video frame to get the print surface, if this is
|
||||
// a static document and we're not actually playing video
|
||||
gfxASurface* GetPrintSurface() { return mPrintSurface; }
|
||||
|
||||
// Dispatch events
|
||||
nsresult DispatchSimpleEvent(const nsAString& aName);
|
||||
|
@ -393,6 +398,10 @@ protected:
|
|||
|
||||
nsRefPtr<nsMediaDecoder> mDecoder;
|
||||
|
||||
// A reference to the ImageContainer which contains the current frame
|
||||
// of video to display.
|
||||
nsRefPtr<ImageContainer> mImageContainer;
|
||||
|
||||
// Holds a reference to the first channel we open to the media resource.
|
||||
// Once the decoder is created, control over the channel passes to the
|
||||
// decoder, and we null out this reference. We must store this in case
|
||||
|
|
|
@ -57,6 +57,7 @@
|
|||
#include "prlock.h"
|
||||
#include "nsThreadUtils.h"
|
||||
#include "nsContentUtils.h"
|
||||
#include "nsFrameManager.h"
|
||||
|
||||
#include "nsIScriptSecurityManager.h"
|
||||
#include "nsIXPConnect.h"
|
||||
|
@ -73,6 +74,7 @@
|
|||
#include "nsCommaSeparatedTokenizer.h"
|
||||
#include "nsMediaStream.h"
|
||||
|
||||
#include "nsIDOMHTMLVideoElement.h"
|
||||
#include "nsIContentPolicy.h"
|
||||
#include "nsContentPolicyUtils.h"
|
||||
#include "nsContentErrors.h"
|
||||
|
@ -80,6 +82,7 @@
|
|||
#include "nsCycleCollectionParticipant.h"
|
||||
#include "nsLayoutUtils.h"
|
||||
#include "nsVideoFrame.h"
|
||||
#include "BasicLayers.h"
|
||||
|
||||
#ifdef MOZ_OGG
|
||||
#include "nsOggDecoder.h"
|
||||
|
@ -98,6 +101,8 @@ static PRLogModuleInfo* gMediaElementEventsLog;
|
|||
#define LOG_EVENT(type, msg)
|
||||
#endif
|
||||
|
||||
using namespace mozilla::layers;
|
||||
|
||||
// Under certain conditions there may be no-one holding references to
|
||||
// a media element from script, DOM parent, etc, but the element may still
|
||||
// fire meaningful events in the future so we can't destroy it yet:
|
||||
|
@ -1737,26 +1742,62 @@ void nsHTMLMediaElement::NotifyAutoplayDataReady()
|
|||
}
|
||||
}
|
||||
|
||||
void nsHTMLMediaElement::Paint(gfxContext* aContext,
|
||||
gfxPattern::GraphicsFilter aFilter,
|
||||
const gfxRect& aRect)
|
||||
/**
|
||||
* Returns a layer manager to use for the given document. Basically we
|
||||
* look up the document hierarchy for the first document which has
|
||||
* a presentation with an associated widget, and use that widget's
|
||||
* layer manager.
|
||||
*/
|
||||
static already_AddRefed<LayerManager> GetLayerManagerForDoc(nsIDocument* aDoc)
|
||||
{
|
||||
if (mPrintSurface) {
|
||||
nsRefPtr<gfxPattern> pat = new gfxPattern(mPrintSurface);
|
||||
if (!pat)
|
||||
return;
|
||||
// Make the source image fill the rectangle completely
|
||||
pat->SetMatrix(gfxMatrix().Scale(mMediaSize.width/aRect.Width(),
|
||||
mMediaSize.height/aRect.Height()));
|
||||
while (aDoc) {
|
||||
nsIDocument* displayDoc = aDoc->GetDisplayDocument();
|
||||
if (displayDoc) {
|
||||
aDoc = displayDoc;
|
||||
continue;
|
||||
}
|
||||
|
||||
pat->SetFilter(aFilter);
|
||||
|
||||
aContext->NewPath();
|
||||
aContext->PixelSnappedRectangleAndSetPattern(aRect, pat);
|
||||
aContext->Fill();
|
||||
} else if (mDecoder) {
|
||||
mDecoder->Paint(aContext, aFilter, aRect);
|
||||
nsIPresShell* shell = aDoc->GetPrimaryShell();
|
||||
if (shell) {
|
||||
nsIFrame* rootFrame = shell->FrameManager()->GetRootFrame();
|
||||
if (rootFrame) {
|
||||
nsIWidget* widget =
|
||||
nsLayoutUtils::GetDisplayRootFrame(rootFrame)->GetWindow();
|
||||
if (widget) {
|
||||
nsRefPtr<LayerManager> manager = widget->GetLayerManager();
|
||||
return manager.forget();
|
||||
}
|
||||
}
|
||||
}
|
||||
aDoc = aDoc->GetParentDocument();
|
||||
}
|
||||
|
||||
nsRefPtr<LayerManager> manager = new BasicLayerManager(nsnull);
|
||||
return manager.forget();
|
||||
}
|
||||
|
||||
ImageContainer* nsHTMLMediaElement::GetImageContainer()
|
||||
{
|
||||
if (mImageContainer)
|
||||
return mImageContainer;
|
||||
|
||||
// If we have a print surface, this is just a static image so
|
||||
// no image container is required
|
||||
if (mPrintSurface)
|
||||
return nsnull;
|
||||
|
||||
// Only video frames need an image container.
|
||||
nsCOMPtr<nsIDOMHTMLVideoElement> video =
|
||||
do_QueryInterface(static_cast<nsIContent*>(this));
|
||||
if (!video)
|
||||
return nsnull;
|
||||
|
||||
nsRefPtr<LayerManager> manager = GetLayerManagerForDoc(GetOwnerDoc());
|
||||
if (!manager)
|
||||
return nsnull;
|
||||
|
||||
mImageContainer = manager->CreateImageContainer();
|
||||
return mImageContainer;
|
||||
}
|
||||
|
||||
nsresult nsHTMLMediaElement::DispatchSimpleEvent(const nsAString& aName)
|
||||
|
|
|
@ -70,7 +70,6 @@ nsMediaDecoder::nsMediaDecoder() :
|
|||
mProgressTime(),
|
||||
mDataTime(),
|
||||
mVideoUpdateLock(nsnull),
|
||||
mFramerate(0.0),
|
||||
mAspectRatio(1.0),
|
||||
mSizeChanged(PR_FALSE),
|
||||
mShuttingDown(PR_FALSE)
|
||||
|
@ -213,78 +212,20 @@ nsresult nsMediaDecoder::StopProgress()
|
|||
return rv;
|
||||
}
|
||||
|
||||
void nsMediaDecoder::SetRGBData(PRInt32 aWidth, PRInt32 aHeight, float aFramerate,
|
||||
float aAspectRatio, unsigned char* aRGBBuffer)
|
||||
void nsMediaDecoder::SetVideoData(const gfxIntSize& aSize,
|
||||
float aAspectRatio,
|
||||
Image* aImage)
|
||||
{
|
||||
nsAutoLock lock(mVideoUpdateLock);
|
||||
|
||||
if (mRGBWidth != aWidth || mRGBHeight != aHeight ||
|
||||
if (mRGBWidth != aSize.width || mRGBHeight != aSize.height ||
|
||||
mAspectRatio != aAspectRatio) {
|
||||
mRGBWidth = aWidth;
|
||||
mRGBHeight = aHeight;
|
||||
mRGBWidth = aSize.width;
|
||||
mRGBHeight = aSize.height;
|
||||
mAspectRatio = aAspectRatio;
|
||||
mSizeChanged = PR_TRUE;
|
||||
}
|
||||
mFramerate = aFramerate;
|
||||
mRGB = aRGBBuffer;
|
||||
}
|
||||
|
||||
void nsMediaDecoder::Paint(gfxContext* aContext,
|
||||
gfxPattern::GraphicsFilter aFilter,
|
||||
const gfxRect& aRect)
|
||||
{
|
||||
nsAutoLock lock(mVideoUpdateLock);
|
||||
|
||||
if (!mRGB)
|
||||
return;
|
||||
|
||||
nsRefPtr<gfxImageSurface> imgSurface =
|
||||
new gfxImageSurface(mRGB,
|
||||
gfxIntSize(mRGBWidth, mRGBHeight),
|
||||
mRGBWidth * 4,
|
||||
gfxASurface::ImageFormatRGB24);
|
||||
if (!imgSurface)
|
||||
return;
|
||||
|
||||
nsRefPtr<gfxASurface> surface(imgSurface);
|
||||
|
||||
#if defined(XP_MACOSX)
|
||||
nsRefPtr<gfxQuartzImageSurface> quartzSurface =
|
||||
new gfxQuartzImageSurface(imgSurface);
|
||||
if (!quartzSurface)
|
||||
return;
|
||||
|
||||
surface = quartzSurface;
|
||||
#endif
|
||||
|
||||
nsRefPtr<gfxPattern> pat = new gfxPattern(surface);
|
||||
if (!pat)
|
||||
return;
|
||||
|
||||
// Make the source image fill the rectangle completely
|
||||
pat->SetMatrix(gfxMatrix().Scale(mRGBWidth/aRect.Width(), mRGBHeight/aRect.Height()));
|
||||
|
||||
pat->SetFilter(aFilter);
|
||||
|
||||
// Set PAD mode so that when the video is being scaled, we do not sample
|
||||
// outside the bounds of the video image.
|
||||
gfxPattern::GraphicsExtend extend = gfxPattern::EXTEND_PAD;
|
||||
|
||||
// PAD is slow with X11 and Quartz surfaces, so prefer speed over correctness
|
||||
// and use NONE.
|
||||
nsRefPtr<gfxASurface> target = aContext->CurrentSurface();
|
||||
gfxASurface::gfxSurfaceType type = target->GetType();
|
||||
if (type == gfxASurface::SurfaceTypeXlib ||
|
||||
type == gfxASurface::SurfaceTypeXcb ||
|
||||
type == gfxASurface::SurfaceTypeQuartz) {
|
||||
extend = gfxPattern::EXTEND_NONE;
|
||||
if (mImageContainer && aImage) {
|
||||
mImageContainer->SetCurrentImage(aImage);
|
||||
}
|
||||
|
||||
pat->SetExtend(extend);
|
||||
|
||||
/* Draw RGB surface onto frame */
|
||||
aContext->NewPath();
|
||||
aContext->PixelSnappedRectangleAndSetPattern(aRect, pat);
|
||||
aContext->Fill();
|
||||
}
|
||||
|
||||
|
|
|
@ -46,19 +46,22 @@
|
|||
#include "gfxContext.h"
|
||||
#include "gfxRect.h"
|
||||
#include "nsITimer.h"
|
||||
#include "ImageLayers.h"
|
||||
|
||||
class nsHTMLMediaElement;
|
||||
class nsMediaStream;
|
||||
class nsIStreamListener;
|
||||
|
||||
// All methods of nsMediaDecoder must be called from the main thread only
|
||||
// with the exception of SetRGBData and GetStatistics, which can be
|
||||
// called from any thread.
|
||||
// with the exception of GetImageContainer, SetVideoData and GetStatistics,
|
||||
// which can be called from any thread.
|
||||
class nsMediaDecoder : public nsIObserver
|
||||
{
|
||||
public:
|
||||
typedef mozilla::TimeStamp TimeStamp;
|
||||
typedef mozilla::TimeDuration TimeDuration;
|
||||
typedef mozilla::layers::ImageContainer ImageContainer;
|
||||
typedef mozilla::layers::Image Image;
|
||||
|
||||
nsMediaDecoder();
|
||||
virtual ~nsMediaDecoder();
|
||||
|
@ -111,15 +114,6 @@ public:
|
|||
virtual nsresult Load(nsMediaStream* aStream,
|
||||
nsIStreamListener **aListener) = 0;
|
||||
|
||||
// Draw the latest video data. This is done
|
||||
// here instead of in nsVideoFrame so that the lock around the
|
||||
// RGB buffer doesn't have to be exposed publically.
|
||||
// The current video frame is drawn to fill aRect.
|
||||
// Called in the main thread only.
|
||||
virtual void Paint(gfxContext* aContext,
|
||||
gfxPattern::GraphicsFilter aFilter,
|
||||
const gfxRect& aRect);
|
||||
|
||||
// Called when the video file has completed downloading.
|
||||
virtual void ResourceLoaded() = 0;
|
||||
|
||||
|
@ -229,6 +223,11 @@ public:
|
|||
// their nsMediaStream.
|
||||
virtual void MoveLoadsToBackground()=0;
|
||||
|
||||
// Gets the image container for the media element. Will return null if
|
||||
// the element is not a video element. This can be called from any
|
||||
// thread; ImageContainers can be used from any thread.
|
||||
ImageContainer* GetImageContainer() { return mImageContainer; }
|
||||
|
||||
protected:
|
||||
|
||||
// Start timer to update download progress information.
|
||||
|
@ -237,15 +236,11 @@ protected:
|
|||
// Stop progress information timer.
|
||||
nsresult StopProgress();
|
||||
|
||||
// Set the RGB width, height, pixel aspect ratio, and framerate.
|
||||
// Ownership of the passed RGB buffer is transferred to the decoder.
|
||||
// This is the only nsMediaDecoder method that may be called from
|
||||
// threads other than the main thread.
|
||||
void SetRGBData(PRInt32 aWidth,
|
||||
PRInt32 aHeight,
|
||||
float aFramerate,
|
||||
float aAspectRatio,
|
||||
unsigned char* aRGBBuffer);
|
||||
// Set the video width, height, pixel aspect ratio, and current image.
|
||||
// Ownership of the image is transferred to the decoder.
|
||||
void SetVideoData(const gfxIntSize& aSize,
|
||||
float aAspectRatio,
|
||||
Image* aImage);
|
||||
|
||||
protected:
|
||||
// Timer used for updating progress events
|
||||
|
@ -256,14 +251,11 @@ protected:
|
|||
// The decoder does not add a reference the element.
|
||||
nsHTMLMediaElement* mElement;
|
||||
|
||||
// RGB data for last decoded frame of video data.
|
||||
// The size of the buffer is mRGBWidth*mRGBHeight*4 bytes and
|
||||
// contains bytes in RGBA format.
|
||||
nsAutoArrayPtr<unsigned char> mRGB;
|
||||
|
||||
PRInt32 mRGBWidth;
|
||||
PRInt32 mRGBHeight;
|
||||
|
||||
nsRefPtr<ImageContainer> mImageContainer;
|
||||
|
||||
// Time that the last progress event was fired. Read/Write from the
|
||||
// main thread only.
|
||||
TimeStamp mProgressTime;
|
||||
|
|
|
@ -56,6 +56,7 @@
|
|||
|
||||
using mozilla::TimeDuration;
|
||||
using mozilla::TimeStamp;
|
||||
using namespace mozilla::layers;
|
||||
|
||||
#ifdef PR_LOGGING
|
||||
static PRLogModuleInfo* gOggDecoderLog;
|
||||
|
@ -1015,35 +1016,63 @@ void nsOggDecodeStateMachine::PlayFrame() {
|
|||
}
|
||||
}
|
||||
|
||||
static void ToARGBHook(const PlanarYCbCrImage::Data& aData, PRUint8* aOutput)
|
||||
{
|
||||
OggPlayYUVChannels yuv;
|
||||
NS_ASSERTION(aData.mYStride == aData.mYSize.width,
|
||||
"Stride not supported");
|
||||
NS_ASSERTION(aData.mCbCrStride == aData.mCbCrSize.width,
|
||||
"Stride not supported");
|
||||
yuv.ptry = aData.mYChannel;
|
||||
yuv.ptru = aData.mCbChannel;
|
||||
yuv.ptrv = aData.mCrChannel;
|
||||
yuv.uv_width = aData.mCbCrSize.width;
|
||||
yuv.uv_height = aData.mCbCrSize.height;
|
||||
yuv.y_width = aData.mYSize.width;
|
||||
yuv.y_height = aData.mYSize.height;
|
||||
|
||||
OggPlayRGBChannels rgb;
|
||||
rgb.ptro = aOutput;
|
||||
rgb.rgb_width = aData.mYSize.width;
|
||||
rgb.rgb_height = aData.mYSize.height;
|
||||
|
||||
oggplay_yuv2bgra(&yuv, &rgb);
|
||||
}
|
||||
|
||||
void nsOggDecodeStateMachine::PlayVideo(FrameData* aFrame)
|
||||
{
|
||||
PR_ASSERT_CURRENT_THREAD_IN_MONITOR(mDecoder->GetMonitor());
|
||||
if (aFrame && aFrame->mVideoHeader) {
|
||||
OggPlayVideoData* videoData = oggplay_callback_info_get_video_data(aFrame->mVideoHeader);
|
||||
ImageContainer* container = mDecoder->GetImageContainer();
|
||||
// Currently our Ogg decoder only knows how to output to PLANAR_YCBCR
|
||||
// format.
|
||||
Image::Format format = Image::PLANAR_YCBCR;
|
||||
nsRefPtr<Image> image;
|
||||
if (container) {
|
||||
image = container->CreateImage(&format, 1);
|
||||
}
|
||||
if (image) {
|
||||
NS_ASSERTION(image->GetFormat() == Image::PLANAR_YCBCR,
|
||||
"Wrong format?");
|
||||
PlanarYCbCrImage* videoImage = static_cast<PlanarYCbCrImage*>(image.get());
|
||||
|
||||
OggPlayYUVChannels yuv;
|
||||
yuv.ptry = videoData->y;
|
||||
yuv.ptru = videoData->u;
|
||||
yuv.ptrv = videoData->v;
|
||||
yuv.uv_width = aFrame->mUVWidth;
|
||||
yuv.uv_height = aFrame->mUVHeight;
|
||||
yuv.y_width = aFrame->mVideoWidth;
|
||||
yuv.y_height = aFrame->mVideoHeight;
|
||||
// XXX this is only temporary until we get YUV code in the layer
|
||||
// system.
|
||||
videoImage->SetRGBConverter(ToARGBHook);
|
||||
|
||||
size_t size = aFrame->mVideoWidth * aFrame->mVideoHeight * 4;
|
||||
nsAutoArrayPtr<unsigned char> buffer(new unsigned char[size]);
|
||||
if (!buffer)
|
||||
return;
|
||||
OggPlayVideoData* videoData = oggplay_callback_info_get_video_data(aFrame->mVideoHeader);
|
||||
PlanarYCbCrImage::Data data;
|
||||
data.mYChannel = videoData->y;
|
||||
data.mYSize = gfxIntSize(aFrame->mVideoWidth, aFrame->mVideoHeight);
|
||||
data.mYStride = data.mYSize.width;
|
||||
data.mCbChannel = videoData->u;
|
||||
data.mCrChannel = videoData->v;
|
||||
data.mCbCrSize = gfxIntSize(aFrame->mUVWidth, aFrame->mUVHeight);
|
||||
data.mCbCrStride = data.mCbCrSize.width;
|
||||
videoImage->SetData(data);
|
||||
|
||||
OggPlayRGBChannels rgb;
|
||||
rgb.ptro = buffer;
|
||||
rgb.rgb_width = aFrame->mVideoWidth;
|
||||
rgb.rgb_height = aFrame->mVideoHeight;
|
||||
|
||||
oggplay_yuv2bgra(&yuv, &rgb);
|
||||
|
||||
mDecoder->SetRGBData(aFrame->mVideoWidth, aFrame->mVideoHeight,
|
||||
mFramerate, mAspectRatio, buffer.forget());
|
||||
mDecoder->SetVideoData(data.mYSize, mAspectRatio, image);
|
||||
}
|
||||
|
||||
// Don't play the frame's video data more than once.
|
||||
aFrame->ClearVideoHeader();
|
||||
|
@ -1871,7 +1900,8 @@ void nsOggDecodeStateMachine::LoadOggHeaders(nsChannelReader* aReader)
|
|||
int y_width;
|
||||
int y_height;
|
||||
oggplay_get_video_y_size(mPlayer, i, &y_width, &y_height);
|
||||
mDecoder->SetRGBData(y_width, y_height, mFramerate, mAspectRatio, nsnull);
|
||||
mDecoder->SetVideoData(gfxIntSize(y_width, y_height), mAspectRatio,
|
||||
nsnull);
|
||||
}
|
||||
else if (mAudioTrack == -1 && oggplay_get_track_type(mPlayer, i) == OGGZ_CONTENT_VORBIS) {
|
||||
mAudioTrack = i;
|
||||
|
@ -2011,6 +2041,8 @@ PRBool nsOggDecoder::Init(nsHTMLMediaElement* aElement)
|
|||
mReader = new nsChannelReader();
|
||||
NS_ENSURE_TRUE(mReader, PR_FALSE);
|
||||
|
||||
mImageContainer = aElement->GetImageContainer();
|
||||
|
||||
return PR_TRUE;
|
||||
}
|
||||
|
||||
|
|
|
@ -89,6 +89,7 @@
|
|||
#include "nsIImageLoadingContent.h"
|
||||
#include "nsCOMPtr.h"
|
||||
#include "nsListControlFrame.h"
|
||||
#include "ImageLayers.h"
|
||||
|
||||
#ifdef MOZ_SVG
|
||||
#include "nsSVGUtils.h"
|
||||
|
@ -97,6 +98,8 @@
|
|||
#include "nsSVGOuterSVGFrame.h"
|
||||
#endif
|
||||
|
||||
using namespace mozilla::layers;
|
||||
|
||||
/**
|
||||
* A namespace class for static layout utilities.
|
||||
*/
|
||||
|
@ -3436,26 +3439,31 @@ nsLayoutUtils::SurfaceFromElement(nsIDOMElement *aElement,
|
|||
if (!principal)
|
||||
return result;
|
||||
|
||||
PRUint32 w, h;
|
||||
rv = video->GetVideoWidth(&w);
|
||||
rv |= video->GetVideoHeight(&h);
|
||||
if (NS_FAILED(rv))
|
||||
ImageContainer *container = video->GetImageContainer();
|
||||
if (!container)
|
||||
return result;
|
||||
|
||||
nsRefPtr<gfxASurface> surf;
|
||||
if (wantImageSurface) {
|
||||
surf = new gfxImageSurface(gfxIntSize(w, h), gfxASurface::ImageFormatARGB32);
|
||||
} else {
|
||||
surf = gfxPlatform::GetPlatform()->CreateOffscreenSurface(gfxIntSize(w, h), gfxASurface::ImageFormatARGB32);
|
||||
gfxIntSize size;
|
||||
nsRefPtr<gfxASurface> surf = container->GetCurrentAsSurface(&size);
|
||||
if (!surf)
|
||||
return result;
|
||||
|
||||
if (wantImageSurface && surf->GetType() != gfxASurface::SurfaceTypeImage) {
|
||||
nsRefPtr<gfxImageSurface> imgSurf =
|
||||
new gfxImageSurface(size, gfxASurface::ImageFormatARGB32);
|
||||
if (!imgSurf)
|
||||
return result;
|
||||
|
||||
nsRefPtr<gfxContext> ctx = new gfxContext(imgSurf);
|
||||
if (!ctx)
|
||||
return result;
|
||||
ctx->SetOperator(gfxContext::OPERATOR_SOURCE);
|
||||
ctx->DrawSurface(surf, size);
|
||||
surf = imgSurf;
|
||||
}
|
||||
|
||||
nsRefPtr<gfxContext> ctx = new gfxContext(surf);
|
||||
|
||||
ctx->SetOperator(gfxContext::OPERATOR_SOURCE);
|
||||
video->Paint(ctx, gfxPattern::FILTER_NEAREST, gfxRect(0, 0, w, h));
|
||||
|
||||
result.mSurface = surf;
|
||||
result.mSize = gfxIntSize(w, h);
|
||||
result.mSize = size;
|
||||
result.mPrincipal = principal;
|
||||
result.mIsWriteOnly = PR_FALSE;
|
||||
|
||||
|
|
|
@ -64,6 +64,8 @@
|
|||
#include "nsIAccessibilityService.h"
|
||||
#endif
|
||||
|
||||
using namespace mozilla::layers;
|
||||
|
||||
nsIFrame*
|
||||
NS_NewHTMLVideoFrame(nsIPresShell* aPresShell, nsStyleContext* aContext)
|
||||
{
|
||||
|
@ -172,25 +174,77 @@ CorrectForAspectRatio(const gfxRect& aRect, const nsIntSize& aRatio)
|
|||
return gfxRect(aRect.TopLeft() + topLeft, scaledRatio);
|
||||
}
|
||||
|
||||
void
|
||||
nsVideoFrame::PaintVideo(nsIRenderingContext& aRenderingContext,
|
||||
const nsRect& aDirtyRect, nsPoint aPt)
|
||||
already_AddRefed<Layer>
|
||||
nsVideoFrame::BuildLayer(nsDisplayListBuilder* aBuilder,
|
||||
LayerManager* aManager)
|
||||
{
|
||||
nsRect area = GetContentRect() - GetPosition() + aPt;
|
||||
nsRect area = GetContentRect() + aBuilder->ToReferenceFrame(GetParent());
|
||||
nsHTMLVideoElement* element = static_cast<nsHTMLVideoElement*>(GetContent());
|
||||
nsIntSize videoSize = element->GetVideoSize(nsIntSize(0, 0));
|
||||
if (videoSize.width <= 0 || videoSize.height <= 0 || area.IsEmpty())
|
||||
return;
|
||||
return nsnull;
|
||||
|
||||
gfxContext* ctx = aRenderingContext.ThebesContext();
|
||||
nsRefPtr<ImageContainer> container = element->GetImageContainer();
|
||||
// If we have a container with the right layer manager already, we don't
|
||||
// need to do anything here. Otherwise we need to set up a temporary
|
||||
// ImageContainer, capture the video data and store it in the temp
|
||||
// container.
|
||||
if (!container || container->Manager() != aManager) {
|
||||
nsRefPtr<ImageContainer> tmpContainer = aManager->CreateImageContainer();
|
||||
if (!tmpContainer)
|
||||
return nsnull;
|
||||
|
||||
// We get a reference to the video data as a cairo surface.
|
||||
CairoImage::Data cairoData;
|
||||
nsRefPtr<gfxASurface> imageSurface;
|
||||
if (container) {
|
||||
// Get video from the existing container. It was created for a
|
||||
// different layer manager, so we do fallback through cairo.
|
||||
imageSurface = container->GetCurrentAsSurface(&cairoData.mSize);
|
||||
cairoData.mSurface = imageSurface;
|
||||
} else {
|
||||
// We're probably printing.
|
||||
cairoData.mSurface = element->GetPrintSurface();
|
||||
if (!cairoData.mSurface)
|
||||
return nsnull;
|
||||
cairoData.mSize = gfxIntSize(videoSize.width, videoSize.height);
|
||||
}
|
||||
|
||||
// Now create a CairoImage to display the surface.
|
||||
Image::Format cairoFormat = Image::CAIRO_SURFACE;
|
||||
nsRefPtr<Image> image = tmpContainer->CreateImage(&cairoFormat, 1);
|
||||
if (!image)
|
||||
return nsnull;
|
||||
|
||||
NS_ASSERTION(image->GetFormat() == cairoFormat, "Wrong format");
|
||||
static_cast<CairoImage*>(image.get())->SetData(cairoData);
|
||||
tmpContainer->SetCurrentImage(image);
|
||||
container = tmpContainer.forget();
|
||||
}
|
||||
|
||||
// Compute the rectangle in which to paint the video. We need to use
|
||||
// the largest rectangle that fills our content-box and has the
|
||||
// correct aspect ratio.
|
||||
nsPresContext* presContext = PresContext();
|
||||
gfxRect r = gfxRect(presContext->AppUnitsToGfxUnits(area.x),
|
||||
presContext->AppUnitsToGfxUnits(area.y),
|
||||
presContext->AppUnitsToGfxUnits(area.width),
|
||||
presContext->AppUnitsToGfxUnits(area.height));
|
||||
|
||||
r = CorrectForAspectRatio(r, videoSize);
|
||||
element->Paint(ctx, nsLayoutUtils::GetGraphicsFilterForFrame(this), r);
|
||||
|
||||
nsRefPtr<ImageLayer> layer = aManager->CreateImageLayer();
|
||||
if (!layer)
|
||||
return nsnull;
|
||||
|
||||
layer->SetContainer(container);
|
||||
layer->SetFilter(nsLayoutUtils::GetGraphicsFilterForFrame(this));
|
||||
// Set a transform on the layer to draw the video in the right place
|
||||
gfxMatrix transform;
|
||||
transform.Translate(r.pos);
|
||||
transform.Scale(r.Width()/videoSize.width, r.Height()/videoSize.height);
|
||||
layer->SetTransform(gfx3DMatrix::From2D(transform));
|
||||
nsRefPtr<Layer> result = layer.forget();
|
||||
return result.forget();
|
||||
}
|
||||
|
||||
NS_IMETHODIMP
|
||||
|
@ -274,20 +328,41 @@ nsVideoFrame::Reflow(nsPresContext* aPresContext,
|
|||
return NS_OK;
|
||||
}
|
||||
|
||||
static void PaintVideo(nsIFrame* aFrame, nsIRenderingContext* aCtx,
|
||||
const nsRect& aDirtyRect, nsPoint aPt)
|
||||
{
|
||||
#if 0
|
||||
double start = double(PR_IntervalToMilliseconds(PR_IntervalNow()))/1000.0;
|
||||
class nsDisplayVideo : public nsDisplayItem {
|
||||
public:
|
||||
nsDisplayVideo(nsVideoFrame* aFrame)
|
||||
: nsDisplayItem(aFrame)
|
||||
{
|
||||
MOZ_COUNT_CTOR(nsDisplayVideo);
|
||||
}
|
||||
#ifdef NS_BUILD_REFCNT_LOGGING
|
||||
virtual ~nsDisplayVideo() {
|
||||
MOZ_COUNT_DTOR(nsDisplayVideo);
|
||||
}
|
||||
#endif
|
||||
|
||||
NS_DISPLAY_DECL_NAME("Video")
|
||||
|
||||
static_cast<nsVideoFrame*>(aFrame)->PaintVideo(*aCtx, aDirtyRect, aPt);
|
||||
#if 0
|
||||
double end = double(PR_IntervalToMilliseconds(PR_IntervalNow()))/1000.0;
|
||||
printf("PaintVideo: %f\n", (float)end - (float)start);
|
||||
// It would be great if we could override IsOpaque to return false here,
|
||||
// but it's probably not safe to do so in general. Video frames are
|
||||
// updated asynchronously from decoder threads, and it's possible that
|
||||
// we might have an opaque video frame when IsOpaque is called, but
|
||||
// when we come to paint, the video frame is transparent or has gone
|
||||
// away completely (e.g. because of a decoder error). The problem would
|
||||
// be especially acute if we have off-main-thread rendering.
|
||||
|
||||
#endif
|
||||
}
|
||||
virtual nsRect GetBounds(nsDisplayListBuilder* aBuilder)
|
||||
{
|
||||
nsIFrame* f = GetUnderlyingFrame();
|
||||
return f->GetContentRect() + aBuilder->ToReferenceFrame(f->GetParent());
|
||||
}
|
||||
|
||||
virtual already_AddRefed<Layer> BuildLayer(nsDisplayListBuilder* aBuilder,
|
||||
LayerManager* aManager)
|
||||
{
|
||||
return static_cast<nsVideoFrame*>(mFrame)->BuildLayer(aBuilder, aManager);
|
||||
}
|
||||
};
|
||||
|
||||
NS_IMETHODIMP
|
||||
nsVideoFrame::BuildDisplayList(nsDisplayListBuilder* aBuilder,
|
||||
|
@ -302,9 +377,9 @@ nsVideoFrame::BuildDisplayList(nsDisplayListBuilder* aBuilder,
|
|||
nsresult rv = DisplayBorderBackgroundOutline(aBuilder, aLists);
|
||||
NS_ENSURE_SUCCESS(rv, rv);
|
||||
|
||||
if (!ShouldDisplayPoster() && HasVideoData()) {
|
||||
if (HasVideoElement() && !ShouldDisplayPoster()) {
|
||||
rv = aLists.Content()->AppendNewToTop(
|
||||
new (aBuilder) nsDisplayGeneric(this, ::PaintVideo, "Video"));
|
||||
new (aBuilder) nsDisplayVideo(this));
|
||||
NS_ENSURE_SUCCESS(rv, rv);
|
||||
}
|
||||
|
||||
|
|
|
@ -49,12 +49,17 @@
|
|||
#include "nsITimer.h"
|
||||
#include "nsTArray.h"
|
||||
#include "nsIAnonymousContentCreator.h"
|
||||
#include "Layers.h"
|
||||
#include "ImageLayers.h"
|
||||
|
||||
nsIFrame* NS_NewVideoFrame (nsIPresShell* aPresShell, nsStyleContext* aContext);
|
||||
|
||||
class nsVideoFrame : public nsContainerFrame, public nsIAnonymousContentCreator
|
||||
{
|
||||
public:
|
||||
typedef mozilla::layers::Layer Layer;
|
||||
typedef mozilla::layers::LayerManager LayerManager;
|
||||
|
||||
nsVideoFrame(nsStyleContext* aContext);
|
||||
|
||||
NS_DECL_QUERYFRAME
|
||||
|
@ -112,6 +117,9 @@ public:
|
|||
NS_IMETHOD GetFrameName(nsAString& aResult) const;
|
||||
#endif
|
||||
|
||||
already_AddRefed<Layer> BuildLayer(nsDisplayListBuilder* aBuilder,
|
||||
LayerManager* aManager);
|
||||
|
||||
protected:
|
||||
|
||||
// Returns PR_TRUE if we're rendering for a video element. We still create
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
<!DOCTYPE HTML>
|
||||
<html>
|
||||
<body style="background:white">
|
||||
<div style="position:absolute; left:0; top:0; width:200px; height:600px;">
|
||||
<video src="black140x100.ogv" style="width:400px; margin-left:-100px;"></video>
|
||||
</div>
|
||||
<div style="position:absolute; left:200px; top:0; background:white; width:200px; height:600px;"></div>
|
||||
</body>
|
||||
</html>
|
|
@ -0,0 +1,8 @@
|
|||
<!DOCTYPE HTML>
|
||||
<html>
|
||||
<body style="background:white">
|
||||
<div style="overflow:hidden; position:absolute; left:0; top:0; width:200px; height:600px;">
|
||||
<video src="black140x100.ogv" style="width:400px; margin-left:-100px;"></video>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
|
@ -7,6 +7,7 @@ HTTP(..) == aspect-ratio-3b.xhtml aspect-ratio-3-ref.xhtml
|
|||
HTTP(..) == basic-1.xhtml basic-1-ref.html
|
||||
HTTP(..) == canvas-1a.xhtml basic-1-ref.html
|
||||
HTTP(..) == canvas-1b.xhtml basic-1-ref.html
|
||||
== clipping-1a.html clipping-1-ref.html
|
||||
== empty-1a.html empty-1-ref.html
|
||||
== empty-1b.html empty-1-ref.html
|
||||
random HTTP(..) == object-aspect-ratio-1a.xhtml aspect-ratio-1-ref.html
|
||||
|
|
Загрузка…
Ссылка в новой задаче