MozReview-Commit-ID: AGZ8pI8vmAs
This commit is contained in:
Kartikaya Gupta 2017-02-06 11:53:47 -05:00
Родитель 85742c8431 83e2937963
Коммит cfd6b031e0
174 изменённых файлов: 19353 добавлений и 21427 удалений

Просмотреть файл

@ -3,9 +3,9 @@
const BASE = getRootDirectory(gTestPath)
.replace("chrome://mochitests/content/", "https://example.com/");
const PAGE = `${BASE}/file_install_extensions.html`;
const PERMS_XPI = `${BASE}/browser_webext_permissions.xpi`;
const NO_PERMS_XPI = `${BASE}/browser_webext_nopermissions.xpi`;
const INSTALL_PAGE = `${BASE}/file_install_extensions.html`;
const PERMS_XPI = "browser_webext_permissions.xpi";
const NO_PERMS_XPI = "browser_webext_nopermissions.xpi";
const ID = "permissions@test.mozilla.org";
const DEFAULT_EXTENSION_ICON = "chrome://browser/content/extension.svg";
@ -36,13 +36,13 @@ function promiseGetAddonByID(id) {
});
}
function checkNotification(panel, url) {
function checkNotification(panel, filename) {
let icon = panel.getAttribute("icon");
let ul = document.getElementById("addon-webext-perm-list");
let header = document.getElementById("addon-webext-perm-intro");
if (url == PERMS_XPI) {
if (filename == PERMS_XPI) {
// The icon should come from the extension, don't bother with the precise
// path, just make sure we've got a jar url pointing to the right path
// inside the jar.
@ -52,7 +52,7 @@ function checkNotification(panel, url) {
is(header.getAttribute("hidden"), "", "Permission list header is visible");
is(ul.childElementCount, 4, "Permissions list has 4 entries");
// Real checking of the contents here is deferred until bug 1316996 lands
} else if (url == NO_PERMS_XPI) {
} else if (filename == NO_PERMS_XPI) {
// This extension has no icon, it should have the default
is(icon, DEFAULT_EXTENSION_ICON, "Icon is the default extension icon");
@ -61,18 +61,48 @@ function checkNotification(panel, url) {
}
}
// Navigate the current tab to the given url and return a Promise
// that resolves when the page is loaded.
function load(url) {
gBrowser.selectedBrowser.loadURI(INSTALL_PAGE);
return BrowserTestUtils.browserLoaded(gBrowser.selectedBrowser);
}
const INSTALL_FUNCTIONS = [
function installMozAM(url) {
return ContentTask.spawn(gBrowser.selectedBrowser, url, function*(cUrl) {
yield content.wrappedJSObject.installMozAM(cUrl);
async function installMozAM(filename) {
await load(INSTALL_PAGE);
await ContentTask.spawn(gBrowser.selectedBrowser, `${BASE}/${filename}`, function*(url) {
yield content.wrappedJSObject.installMozAM(url);
});
},
function installTrigger(url) {
ContentTask.spawn(gBrowser.selectedBrowser, url, function*(cUrl) {
content.wrappedJSObject.installTrigger(cUrl);
async function installTrigger(filename) {
await load(INSTALL_PAGE);
ContentTask.spawn(gBrowser.selectedBrowser, `${BASE}/${filename}`, function*(url) {
content.wrappedJSObject.installTrigger(url);
});
return Promise.resolve();
},
async function installFile(filename) {
const ChromeRegistry = Cc["@mozilla.org/chrome/chrome-registry;1"]
.getService(Ci.nsIChromeRegistry);
let chromeUrl = Services.io.newURI(gTestPath);
let fileUrl = ChromeRegistry.convertChromeURL(chromeUrl);
let file = fileUrl.QueryInterface(Ci.nsIFileURL).file;
file.leafName = filename;
let MockFilePicker = SpecialPowers.MockFilePicker;
MockFilePicker.init(window);
MockFilePicker.returnFiles = [file];
await BrowserOpenAddonsMgr("addons://list/extension");
let contentWin = gBrowser.selectedTab.linkedBrowser.contentWindow;
// Do the install...
contentWin.gViewController.doCommand("cmd_installFromFile");
MockFilePicker.cleanup();
},
];
@ -85,8 +115,8 @@ add_task(function* () {
["extensions.webextPermissionPrompts", true],
]});
function* runOnce(installFn, url, cancel) {
let tab = yield BrowserTestUtils.openNewForegroundTab(gBrowser, PAGE);
function* runOnce(installFn, filename, cancel) {
let tab = yield BrowserTestUtils.openNewForegroundTab(gBrowser);
let installPromise = new Promise(resolve => {
let listener = {
@ -118,10 +148,10 @@ add_task(function* () {
AddonManager.addInstallListener(listener);
});
let installMethodPromise = installFn(url);
let installMethodPromise = installFn(filename);
let panel = yield promisePopupNotificationShown("addon-webext-permissions");
checkNotification(panel, url);
checkNotification(panel, filename);
if (cancel) {
panel.secondaryButton.click();
@ -140,7 +170,6 @@ add_task(function* () {
yield installMethodPromise;
}
let result = yield installPromise;
let addon = yield promiseGetAddonByID(ID);
if (cancel) {

Просмотреть файл

@ -815,7 +815,7 @@ muteTab.accesskey = M
unmuteTab.label = Unmute Tab
unmuteTab.accesskey = M
playTab.label = Play Tab
playTab.accesskey = P
playTab.accesskey = l
# LOCALIZATION NOTE (weakCryptoOverriding.message): %S is brandShortName
weakCryptoOverriding.message = %S recommends that you dont enter your password, credit card and other personal information on this website.

Просмотреть файл

@ -919,9 +919,6 @@ endif
ifdef MOZ_CARGO_SUPPORTS_FROZEN
cargo_build_flags += --frozen
endif
ifdef MOZ_ENABLE_WEBRENDER
cargo_build_flags += --features "quantum_render"
endif
cargo_build_flags += --manifest-path $(CARGO_FILE)
ifdef BUILD_VERBOSE_LOG

Просмотреть файл

@ -749,3 +749,7 @@ netmonitor.custom.cancel=Cancel
# LOCALIZATION NOTE (netmonitor.backButton): This is the label displayed
# on the button which exists the performance statistics view
netmonitor.backButton=Back
# LOCALIZATION NOTE (netmonitor.headers.learnMore): This is the label displayed
# next to a header list item, with a link to external documentation
netmonitor.headers.learnMore=Learn More

Просмотреть файл

@ -0,0 +1,119 @@
/* this source code form is subject to the terms of the mozilla public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/**
* A mapping of header names to external documentation. Any header included
* here will show a "Learn More" link alongside it.
*/
"use strict";
var URL_DOMAIN = "https://developer.mozilla.org";
const URL_PATH = "/en-US/docs/Web/HTTP/Headers/";
const URL_PARAMS =
"?utm_source=mozilla&utm_medium=devtools-netmonitor&utm_campaign=default";
var SUPPORTED_HEADERS = [
"Accept",
"Accept-Charset",
"Accept-Encoding",
"Accept-Language",
"Accept-Ranges",
"Access-Control-Allow-Credentials",
"Access-Control-Allow-Headers",
"Access-Control-Allow-Methods",
"Access-Control-Allow-Origin",
"Access-Control-Expose-Headers",
"Access-Control-Max-Age",
"Access-Control-Request-Headers",
"Access-Control-Request-Method",
"Age",
"Cache-Control",
"Connection",
"Content-Disposition",
"Content-Encoding",
"Content-Language",
"Content-Length",
"Content-Location",
"Content-Security-Policy",
"Content-Security-Policy-Report-Only",
"Content-Type",
"Cookie",
"Cookie2",
"DNT",
"Date",
"ETag",
"Expires",
"From",
"Host",
"If-Match",
"If-Modified-Since",
"If-None-Match",
"If-Range",
"If-Unmodified-Since",
"Keep-Alive",
"Last-Modified",
"Location",
"Origin",
"Pragma",
"Public-Key-Pins",
"Public-Key-Pins-Report-Only",
"Referer",
"Referrer-Policy",
"Retry-After",
"Server",
"Set-Cookie",
"Set-Cookie2",
"Strict-Transport-Security",
"TE",
"Tk",
"Trailer",
"Transfer-Encoding",
"Upgrade-Insecure-Requests",
"User-Agent",
"Vary",
"Via",
"Warning",
"X-Content-Type-Options",
"X-DNS-Prefetch-Control",
"X-Frame-Options",
"X-XSS-Protection"
];
/**
* Get the MDN URL for the specified header
*
* @param {string} Name of the header
* The baseURL to use.
*
* @return {string}
* The MDN URL for the header, or null if not available.
*/
exports.getURL = (header) => {
if (SUPPORTED_HEADERS.indexOf(header) === -1) {
return null;
}
return URL_DOMAIN + URL_PATH + header + URL_PARAMS;
};
/**
* Use a different domain for the URLs. Used only for testing.
*
* @param {string} domain
* The domain to use.
*/
exports.setDomain = (domain) => {
URL_DOMAIN = domain;
};
/**
* Use a different list of supported headers. Used only for testing.
*
* @param {array} headers
* The supported headers to use.
*/
exports.setSupportedHeaders = (headers) => {
SUPPORTED_HEADERS = headers;
};

Просмотреть файл

@ -15,11 +15,16 @@ const {
const { L10N } = require("../../l10n");
const { writeHeaderText } = require("../../request-utils");
const { getFormattedSize } = require("../../utils/format-utils");
const Services = require("Services");
const { gDevTools } = require("devtools/client/framework/devtools");
const HeadersMDN = require("devtools/client/netmonitor/shared/components/headers-mdn");
const { REPS, MODE } = require("devtools/client/shared/components/reps/load-reps");
const Rep = createFactory(REPS.Rep);
// Components
const PropertiesView = createFactory(require("./properties-view"));
const { div, input, textarea } = DOM;
const { a, div, input, textarea } = DOM;
const EDIT_AND_RESEND = L10N.getStr("netmonitor.summary.editAndResend");
const RAW_HEADERS = L10N.getStr("netmonitor.summary.rawHeaders");
const RAW_HEADERS_REQUEST = L10N.getStr("netmonitor.summary.rawHeaders.requestHeaders");
@ -45,6 +50,7 @@ const HeadersPanel = createClass({
propTypes: {
cloneSelectedRequest: PropTypes.func.isRequired,
request: PropTypes.object.isRequired,
renderValue: PropTypes.func
},
getInitialState() {
@ -213,10 +219,49 @@ const HeadersPanel = createClass({
object,
filterPlaceHolder: HEADERS_FILTER_TEXT,
sectionNames: Object.keys(object),
renderValue
}),
)
);
}
});
function onLearnMoreClick(e, headerDocURL) {
e.stopPropagation();
e.preventDefault();
let win = Services.wm.getMostRecentWindow(gDevTools.chromeWindowType);
win.openUILinkIn(headerDocURL, "tab");
}
function renderValue(props) {
const { member, value } = props;
if (typeof value !== "string") {
return null;
}
let headerDocURL = HeadersMDN.getURL(member.name);
return (
div({ className: "treeValueCellDivider" },
Rep(Object.assign(props, {
// FIXME: A workaround for the issue in StringRep
// Force StringRep to crop the text everytime
member: Object.assign({}, member, { open: false }),
mode: MODE.TINY,
cropLimit: 60,
})),
headerDocURL ?
a({
className: "learn-more-link",
title: headerDocURL,
onClick: (e) => onLearnMoreClick(e, headerDocURL),
}, `[${L10N.getStr("netmonitor.headers.learnMore")}]`)
:
null
)
);
}
module.exports = HeadersPanel;

Просмотреть файл

@ -6,6 +6,7 @@ DevToolsModules(
'cookies-panel.js',
'details-panel.js',
'editor.js',
'headers-mdn.js',
'headers-panel.js',
'params-panel.js',
'preview-panel.js',

Просмотреть файл

@ -98,6 +98,7 @@ skip-if = (os == 'linux' && bits == 32 && debug) # bug 1328915, disable linux32
[browser_net_cyrillic-01.js]
[browser_net_cyrillic-02.js]
[browser_net_frame.js]
[browser_net_header-docs.js]
skip-if = (os == 'linux' && debug && bits == 32) # Bug 1321434
[browser_net_filter-01.js]
skip-if = (os == 'linux' && debug && bits == 32) # Bug 1303439

Просмотреть файл

@ -0,0 +1,56 @@
/* Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/ */
"use strict";
const HeadersMDN = require("devtools/client/netmonitor/shared/components/headers-mdn");
/**
* Tests if "Learn More" links are correctly displayed
* next to headers.
*/
add_task(function* () {
let { tab, monitor } = yield initNetMonitor(POST_DATA_URL);
info("Starting test... ");
let { document, NetMonitorView } = monitor.panelWin;
let { RequestsMenu } = NetMonitorView;
RequestsMenu.lazyUpdate = false;
let wait = waitForNetworkEvents(monitor, 0, 2);
yield ContentTask.spawn(tab.linkedBrowser, {}, function* () {
content.wrappedJSObject.performRequests();
});
yield wait;
let origItem = RequestsMenu.getItemAtIndex(0);
RequestsMenu.selectedItem = origItem;
EventUtils.sendMouseEvent({ type: "click" },
document.querySelectorAll(".request-list-item")[0]);
testShowLearnMore(origItem);
return teardown(monitor);
/*
* Tests that a "Learn More" button is only shown if
* and only if a header is documented in MDN.
*/
function testShowLearnMore(data) {
document.querySelectorAll(".properties-view .treeRow.stringRow").forEach((rowEl, index) => {
let headerName = rowEl.querySelectorAll(".treeLabelCell .treeLabel")[0].textContent;
let headerDocURL = HeadersMDN.getURL(headerName);
let learnMoreEl = rowEl.querySelectorAll(".treeValueCell .learn-more-link");
if (headerDocURL === null) {
ok(learnMoreEl.length === 0,
"undocumented header does not include a \"Learn More\" button");
} else {
ok(learnMoreEl[0].getAttribute("title") === headerDocURL,
"documented header includes a \"Learn More\" button with a link to MDN");
}
});
}
});

Просмотреть файл

@ -77,6 +77,23 @@
display: none;
}
.treeTable .treeValueCellDivider {
display: flex;
flex-wrap: wrap;
justify-content: space-between;
}
/* Learn More link */
.treeTable .treeValueCell .learn-more-link {
color: var(--theme-highlight-blue);
cursor: pointer;
margin: 0 5px;
}
.treeTable .treeValueCell .learn-more-link:hover {
text-decoration: underline;
}
/******************************************************************************/
/* Toggle Icon */

Просмотреть файл

@ -67,24 +67,28 @@ namespace {
// The number of queued runnables within the TabGroup ThrottledEventQueue
// at which to begin applying back pressure to the window.
const uint32_t kThrottledEventQueueBackPressure = 5000;
#define DEFAULT_THROTTLED_EVENT_QUEUE_BACK_PRESSURE 5000
static uint32_t gThrottledEventQueueBackPressure;
// The amount of delay to apply to timers when back pressure is triggered.
// As the length of the ThrottledEventQueue grows delay is increased. The
// delay is scaled such that every kThrottledEventQueueBackPressure runnables
// in the queue equates to an additional kBackPressureDelayMS.
const double kBackPressureDelayMS = 500;
#define DEFAULT_BACK_PRESSURE_DELAY_MS 250
static uint32_t gBackPressureDelayMS;
// This defines a limit for how much the delay must drop before we actually
// reduce back pressure throttle amount. This makes the throttle delay
// a bit "sticky" once we enter back pressure.
const double kBackPressureDelayReductionThresholdMS = 400;
#define DEFAULT_BACK_PRESSURE_DELAY_REDUCTION_THRESHOLD_MS 1000
static uint32_t gBackPressureDelayReductionThresholdMS;
// The minimum delay we can reduce back pressure to before we just floor
// the value back to zero. This allows us to ensure that we can exit
// back pressure event if there are always a small number of runnables
// queued up.
const double kBackPressureDelayMinimumMS = 100;
#define DEFAULT_BACK_PRESSURE_DELAY_MINIMUM_MS 100
static uint32_t gBackPressureDelayMinimumMS;
// Convert a ThrottledEventQueue length to a timer delay in milliseconds.
// This will return a value between 0 and INT32_MAX.
@ -92,8 +96,8 @@ int32_t
CalculateNewBackPressureDelayMS(uint32_t aBacklogDepth)
{
double multiplier = static_cast<double>(aBacklogDepth) /
static_cast<double>(kThrottledEventQueueBackPressure);
double value = kBackPressureDelayMS * multiplier;
static_cast<double>(gThrottledEventQueueBackPressure);
double value = static_cast<double>(gBackPressureDelayMS) * multiplier;
// Avoid overflow
if (value > INT32_MAX) {
value = INT32_MAX;
@ -102,7 +106,7 @@ CalculateNewBackPressureDelayMS(uint32_t aBacklogDepth)
// Once we get close to an empty queue just floor the delay back to zero.
// We want to ensure we don't get stuck in a condition where there is a
// small amount of delay remaining due to an active, but reasonable, queue.
else if (value < kBackPressureDelayMinimumMS) {
else if (value < static_cast<double>(gBackPressureDelayMinimumMS)) {
value = 0;
}
return static_cast<int32_t>(value);
@ -153,6 +157,19 @@ TimeoutManager::Initialize()
Preferences::AddBoolVarCache(&gAnnotateTrackingChannels,
"privacy.trackingprotection.annotate_channels",
false);
Preferences::AddUintVarCache(&gThrottledEventQueueBackPressure,
"dom.timeout.throttled_event_queue_back_pressure",
DEFAULT_THROTTLED_EVENT_QUEUE_BACK_PRESSURE);
Preferences::AddUintVarCache(&gBackPressureDelayMS,
"dom.timeout.back_pressure_delay_ms",
DEFAULT_BACK_PRESSURE_DELAY_MS);
Preferences::AddUintVarCache(&gBackPressureDelayReductionThresholdMS,
"dom.timeout.back_pressure_delay_reduction_threshold_ms",
DEFAULT_BACK_PRESSURE_DELAY_REDUCTION_THRESHOLD_MS);
Preferences::AddUintVarCache(&gBackPressureDelayMinimumMS,
"dom.timeout.back_pressure_delay_minimum_ms",
DEFAULT_BACK_PRESSURE_DELAY_MINIMUM_MS);
}
uint32_t
@ -654,7 +671,7 @@ TimeoutManager::MaybeApplyBackPressure()
// rarely fire under normaly circumstances. Its low enough, though,
// that we should have time to slow new runnables from being added before an
// OOM occurs.
if (queue->Length() < kThrottledEventQueueBackPressure) {
if (queue->Length() < gThrottledEventQueueBackPressure) {
return;
}
@ -712,8 +729,8 @@ TimeoutManager::CancelOrUpdateBackPressure(nsGlobalWindow* aWindow)
// can be quite expensive. We only want to call that method if the back log
// is really clearing.
else if (newBackPressureDelayMS == 0 ||
(newBackPressureDelayMS <=
(mBackPressureDelayMS - kBackPressureDelayReductionThresholdMS))) {
(static_cast<uint32_t>(mBackPressureDelayMS) >
(newBackPressureDelayMS + gBackPressureDelayReductionThresholdMS))) {
int32_t oldBackPressureDelayMS = mBackPressureDelayMS;
mBackPressureDelayMS = newBackPressureDelayMS;

Просмотреть файл

@ -612,9 +612,11 @@ IdleRequestExecutor::SetDeadline(TimeStamp aDeadline)
void
IdleRequestExecutor::MaybeDispatch()
{
MOZ_DIAGNOSTIC_ASSERT(mWindow);
if (mDispatched) {
// If we've already dispatched the executor we don't want to do it
// again. Also, if we've called IdleRequestExecutor::Cancel mWindow
// will be null, which indicates that we shouldn't dispatch this
// executor either.
if (mDispatched || !mWindow) {
return;
}

Просмотреть файл

@ -5134,7 +5134,7 @@ public:
~AutoSavepoint();
nsresult
Start(const TransactionBase* aConnection);
Start(const TransactionBase* aTransaction);
nsresult
Commit();

Просмотреть файл

@ -17,14 +17,13 @@
#include "nsDataHashtable.h"
#include "nsThreadUtils.h"
namespace mozilla
{
namespace mozilla {
namespace layers
{
class ImageContainer;
class KnowsCompositor;
namespace layers {
class ImageContainer;
class KnowsCompositor;
} // namespace layers
class AbstractThread;
class MediaResource;
class ReentrantMonitor;
@ -35,7 +34,8 @@ class GMPCrashHelper;
typedef nsDataHashtable<nsCStringHashKey, nsCString> MetadataTags;
static inline bool IsCurrentThread(nsIThread* aThread) {
static inline bool IsCurrentThread(nsIThread* aThread)
{
return NS_GetCurrentThread() == aThread;
}
@ -59,7 +59,10 @@ public:
// Can be called on any thread.
virtual void NotifyDecodedFrames(const FrameStatisticsData& aStats) = 0;
virtual AbstractCanonical<media::NullableTimeUnit>* CanonicalDurationOrNull() { return nullptr; };
virtual AbstractCanonical<media::NullableTimeUnit>* CanonicalDurationOrNull()
{
return nullptr;
};
// Return an event that will be notified when data arrives in MediaResource.
// MediaDecoderReader will register with this event to receive notifications
@ -74,7 +77,8 @@ public:
// and we might have a new compositor. If this new compositor requires us to
// recreate our decoders, then we expect the existing decoderis to return an
// error independently of this.
virtual MediaEventSource<RefPtr<layers::KnowsCompositor>>* CompositorUpdatedEvent()
virtual MediaEventSource<RefPtr<layers::KnowsCompositor>>*
CompositorUpdatedEvent()
{
return nullptr;
}
@ -82,7 +86,7 @@ public:
// Notify the media decoder that a decryption key is required before emitting
// further output. This only needs to be overridden for decoders that expect
// encryption, such as the MediaSource decoder.
virtual void NotifyWaitingForKey() {}
virtual void NotifyWaitingForKey() { }
// Return an event that will be notified when a decoder is waiting for a
// decryption key before it can return more output.
@ -95,13 +99,12 @@ public:
virtual AbstractThread* AbstractMainThread() const = 0;
protected:
virtual void UpdateEstimatedMediaDuration(int64_t aDuration) {};
virtual void UpdateEstimatedMediaDuration(int64_t aDuration) { };
public:
void DispatchUpdateEstimatedMediaDuration(int64_t aDuration)
{
NS_DispatchToMainThread(NewRunnableMethod<int64_t>(this,
&AbstractMediaDecoder::UpdateEstimatedMediaDuration,
aDuration));
NS_DispatchToMainThread(NewRunnableMethod<int64_t>(
this, &AbstractMediaDecoder::UpdateEstimatedMediaDuration, aDuration));
}
virtual VideoFrameContainer* GetVideoFrameContainer() = 0;
@ -112,19 +115,22 @@ public:
virtual MediaDecoderOwner* GetOwner() const = 0;
// Set by Reader if the current audio track can be offloaded
virtual void SetPlatformCanOffloadAudio(bool aCanOffloadAudio) {}
virtual void SetPlatformCanOffloadAudio(bool aCanOffloadAudio) { }
virtual already_AddRefed<GMPCrashHelper> GetCrashHelper() { return nullptr; }
// Stack based class to assist in notifying the frame statistics of
// parsed and decoded frames. Use inside video demux & decode functions
// to ensure all parsed and decoded frames are reported on all return paths.
class AutoNotifyDecoded {
class AutoNotifyDecoded
{
public:
explicit AutoNotifyDecoded(AbstractMediaDecoder* aDecoder)
: mDecoder(aDecoder)
{}
~AutoNotifyDecoded() {
{
}
~AutoNotifyDecoded()
{
if (mDecoder) {
mDecoder->NotifyDecodedFrames(mStats);
}
@ -138,8 +144,12 @@ public:
// Classes directly inheriting from AbstractMediaDecoder do not support
// Observe and it should never be called directly.
NS_IMETHOD Observe(nsISupports *aSubject, const char * aTopic, const char16_t * aData) override
{ MOZ_CRASH("Forbidden method"); return NS_OK; }
NS_IMETHOD Observe(nsISupports* aSubject, const char* aTopic,
const char16_t* aData) override
{
MOZ_CRASH("Forbidden method");
return NS_OK;
}
};
} // namespace mozilla

Просмотреть файл

@ -318,7 +318,7 @@ int AudioStream::InvokeCubeb(Function aFunction, Args&&... aArgs)
}
nsresult
AudioStream::Init(uint32_t aNumChannels, uint32_t aRate,
AudioStream::Init(uint32_t aNumChannels, uint32_t aChannelMap, uint32_t aRate,
const dom::AudioChannel aAudioChannel)
{
auto startTime = TimeStamp::Now();
@ -332,6 +332,7 @@ AudioStream::Init(uint32_t aNumChannels, uint32_t aRate,
cubeb_stream_params params;
params.rate = aRate;
params.channels = mOutChannels;
params.layout = CubebUtils::ConvertChannelMapToCubebLayout(aChannelMap);
#if defined(__ANDROID__)
#if defined(MOZ_B2G)
params.stream_type = CubebUtils::ConvertChannelToCubebType(aAudioChannel);
@ -354,10 +355,6 @@ AudioStream::Init(uint32_t aNumChannels, uint32_t aRate,
return NS_ERROR_DOM_MEDIA_CUBEB_INITIALIZATION_ERR;
}
// The DecodedAudioDataSink forces mono or stereo for now.
params.layout = params.channels == 1 ? CUBEB_LAYOUT_MONO
: CUBEB_LAYOUT_STEREO;
return OpenCubeb(cubebContext, params, startTime, CubebUtils::GetFirstStream());
}

Просмотреть файл

@ -188,9 +188,10 @@ public:
explicit AudioStream(DataSource& aSource);
// Initialize the audio stream. aNumChannels is the number of audio
// channels (1 for mono, 2 for stereo, etc) and aRate is the sample rate
// channels (1 for mono, 2 for stereo, etc), aChannelMap is the indicator for
// channel layout(mono, stereo, 5.1 or 7.1 ) and aRate is the sample rate
// (22050Hz, 44100Hz, etc).
nsresult Init(uint32_t aNumChannels, uint32_t aRate,
nsresult Init(uint32_t aNumChannels, uint32_t aChannelMap, uint32_t aRate,
const dom::AudioChannel aAudioStreamChannel);
// Closes the stream. All future use of the stream is an error.
@ -224,6 +225,11 @@ public:
return CubebUtils::PreferredSampleRate();
}
static uint32_t GetPreferredChannelMap(uint32_t aChannels)
{
return CubebUtils::PreferredChannelMap(aChannels);
}
uint32_t GetOutChannels() { return mOutChannels; }
// Set playback rate as a multiple of the intrinsic playback rate. This is to

Просмотреть файл

@ -10,10 +10,12 @@
#include "MediaPrefs.h"
#include "PDMFactory.h"
#include "WebMDemuxer.h"
#include "gfxPrefs.h"
#include "mozilla/AbstractThread.h"
#include "mozilla/Preferences.h"
#include "mozilla/Telemetry.h"
#include "mozilla/dom/ContentChild.h"
#include "mozilla/gfx/gfxVars.h"
#ifndef MOZ_WIDGET_ANDROID
#include "WebMSample.h"
@ -136,7 +138,8 @@ void
Benchmark::Init()
{
MOZ_ASSERT(NS_IsMainThread());
gfxVars::Initialize();
gfxPrefs::GetSingleton();
MediaPrefs::GetSingleton();
}
@ -150,6 +153,7 @@ BenchmarkPlayback::BenchmarkPlayback(Benchmark* aMainThreadState,
, mSampleIndex(0)
, mFrameCount(0)
, mFinished(false)
, mDrained(false)
{
MOZ_ASSERT(static_cast<Benchmark*>(mMainThreadState)->OnThread());
}
@ -186,8 +190,8 @@ BenchmarkPlayback::DemuxNextSample()
Thread(), __func__,
[this, ref](RefPtr<MediaTrackDemuxer::SamplesHolder> aHolder) {
mSamples.AppendElements(Move(aHolder->mSamples));
if (ref->mParameters.mStopAtFrame &&
mSamples.Length() == (size_t)ref->mParameters.mStopAtFrame.ref()) {
if (ref->mParameters.mStopAtFrame
&& mSamples.Length() == (size_t)ref->mParameters.mStopAtFrame.ref()) {
InitDecoder(Move(*mTrackDemuxer->GetInfo()));
} else {
Dispatch(NS_NewRunnableFunction([this, ref]() { DemuxNextSample(); }));
@ -210,7 +214,7 @@ BenchmarkPlayback::InitDecoder(TrackInfo&& aInfo)
MOZ_ASSERT(OnThread());
RefPtr<PDMFactory> platform = new PDMFactory();
mDecoder = platform->CreateDecoder({ aInfo, mDecoderTaskQueue, reinterpret_cast<MediaDataDecoderCallback*>(this) });
mDecoder = platform->CreateDecoder({ aInfo, mDecoderTaskQueue });
if (!mDecoder) {
MainThreadShutdown();
return;
@ -221,7 +225,7 @@ BenchmarkPlayback::InitDecoder(TrackInfo&& aInfo)
[this, ref](TrackInfo::TrackType aTrackType) {
InputExhausted();
},
[this, ref](MediaResult aError) {
[this, ref](const MediaResult& aError) {
MainThreadShutdown();
});
}
@ -238,98 +242,88 @@ BenchmarkPlayback::MainThreadShutdown()
mFinished = true;
if (mDecoder) {
mDecoder->Flush();
mDecoder->Shutdown();
mDecoder = nullptr;
RefPtr<Benchmark> ref(mMainThreadState);
mDecoder->Flush()->Then(
Thread(), __func__,
[ref, this]() {
mDecoder->Shutdown()->Then(
Thread(), __func__,
[ref, this]() {
mDecoderTaskQueue->BeginShutdown();
mDecoderTaskQueue->AwaitShutdownAndIdle();
mDecoderTaskQueue = nullptr;
if (mTrackDemuxer) {
mTrackDemuxer->Reset();
mTrackDemuxer->BreakCycles();
mTrackDemuxer = nullptr;
}
Thread()->AsTaskQueue()->BeginShutdown()->Then(
ref->Thread(), __func__,
[ref]() { ref->Dispose(); },
[]() { MOZ_CRASH("not reached"); });
},
[]() { MOZ_CRASH("not reached"); });
mDecoder = nullptr;
},
[]() { MOZ_CRASH("not reached"); });
}
}
mDecoderTaskQueue->BeginShutdown();
mDecoderTaskQueue->AwaitShutdownAndIdle();
mDecoderTaskQueue = nullptr;
if (mTrackDemuxer) {
mTrackDemuxer->Reset();
mTrackDemuxer->BreakCycles();
mTrackDemuxer = nullptr;
void
BenchmarkPlayback::Output(const MediaDataDecoder::DecodedData& aResults)
{
MOZ_ASSERT(OnThread());
RefPtr<Benchmark> ref(mMainThreadState);
mFrameCount += aResults.Length();
if (!mDecodeStartTime && mFrameCount >= ref->mParameters.mStartupFrame) {
mDecodeStartTime = Some(TimeStamp::Now());
}
RefPtr<Benchmark> ref(mMainThreadState);
Thread()->AsTaskQueue()->BeginShutdown()->Then(
ref->Thread(), __func__,
[ref]() { ref->Dispose(); },
[]() { MOZ_CRASH("not reached"); });
}
void
BenchmarkPlayback::Output(MediaData* aData)
{
RefPtr<Benchmark> ref(mMainThreadState);
Dispatch(NS_NewRunnableFunction([this, ref]() {
mFrameCount++;
if (mFrameCount == ref->mParameters.mStartupFrame) {
mDecodeStartTime = TimeStamp::Now();
}
int32_t frames = mFrameCount - ref->mParameters.mStartupFrame;
TimeDuration elapsedTime = TimeStamp::Now() - mDecodeStartTime;
if (!mFinished &&
(frames == ref->mParameters.mFramesToMeasure ||
elapsedTime >= ref->mParameters.mTimeout)) {
uint32_t decodeFps = frames / elapsedTime.ToSeconds();
MainThreadShutdown();
ref->Dispatch(NS_NewRunnableFunction([ref, decodeFps]() {
ref->ReturnResult(decodeFps);
}));
}
}));
}
void
BenchmarkPlayback::Error(const MediaResult& aError)
{
RefPtr<Benchmark> ref(mMainThreadState);
Dispatch(NS_NewRunnableFunction([this, ref]() { MainThreadShutdown(); }));
}
void
BenchmarkPlayback::InputExhausted()
{
RefPtr<Benchmark> ref(mMainThreadState);
Dispatch(NS_NewRunnableFunction([this, ref]() {
MOZ_ASSERT(OnThread());
if (mFinished || mSampleIndex >= mSamples.Length()) {
return;
}
mDecoder->Input(mSamples[mSampleIndex]);
mSampleIndex++;
if (mSampleIndex == mSamples.Length()) {
if (ref->mParameters.mStopAtFrame) {
mSampleIndex = 0;
} else {
mDecoder->Drain();
}
}
}));
}
void
BenchmarkPlayback::DrainComplete()
{
RefPtr<Benchmark> ref(mMainThreadState);
Dispatch(NS_NewRunnableFunction([this, ref]() {
int32_t frames = mFrameCount - ref->mParameters.mStartupFrame;
TimeDuration elapsedTime = TimeStamp::Now() - mDecodeStartTime;
TimeStamp now = TimeStamp::Now();
int32_t frames = mFrameCount - ref->mParameters.mStartupFrame;
TimeDuration elapsedTime = now - mDecodeStartTime.refOr(now);
if (!mFinished
&& (((frames == ref->mParameters.mFramesToMeasure) && frames > 0)
|| elapsedTime >= ref->mParameters.mTimeout
|| mDrained)) {
uint32_t decodeFps = frames / elapsedTime.ToSeconds();
MainThreadShutdown();
ref->Dispatch(NS_NewRunnableFunction([ref, decodeFps]() {
ref->ReturnResult(decodeFps);
}));
}));
}
}
bool
BenchmarkPlayback::OnReaderTaskQueue()
void
BenchmarkPlayback::InputExhausted()
{
return OnThread();
MOZ_ASSERT(OnThread());
if (mFinished || mSampleIndex >= mSamples.Length()) {
return;
}
RefPtr<Benchmark> ref(mMainThreadState);
mDecoder->Decode(mSamples[mSampleIndex])
->Then(Thread(), __func__,
[ref, this](const MediaDataDecoder::DecodedData& aResults) {
Output(aResults);
InputExhausted();
},
[ref, this](const MediaResult& aError) { MainThreadShutdown(); });
mSampleIndex++;
if (mSampleIndex == mSamples.Length()) {
if (ref->mParameters.mStopAtFrame) {
mSampleIndex = 0;
} else {
mDecoder->Drain()->Then(
Thread(), __func__,
[ref, this](const MediaDataDecoder::DecodedData& aResults) {
mDrained = true;
Output(aResults);
},
[ref, this](const MediaResult& aError) { MainThreadShutdown(); });
}
}
}
}
} // namespace mozilla

Просмотреть файл

@ -10,6 +10,7 @@
#include "MediaDataDemuxer.h"
#include "QueueObject.h"
#include "PlatformDecoderModule.h"
#include "mozilla/Maybe.h"
#include "mozilla/RefPtr.h"
#include "mozilla/TaskQueue.h"
#include "mozilla/TimeStamp.h"
@ -20,22 +21,17 @@ namespace mozilla {
class TaskQueue;
class Benchmark;
class BenchmarkPlayback : public QueueObject, private MediaDataDecoderCallback
class BenchmarkPlayback : public QueueObject
{
friend class Benchmark;
explicit BenchmarkPlayback(Benchmark* aMainThreadState, MediaDataDemuxer* aDemuxer);
BenchmarkPlayback(Benchmark* aMainThreadState, MediaDataDemuxer* aDemuxer);
void DemuxSamples();
void DemuxNextSample();
void MainThreadShutdown();
void InitDecoder(TrackInfo&& aInfo);
// MediaDataDecoderCallback
// Those methods are called on the MediaDataDecoder's task queue.
void Output(MediaData* aData) override;
void Error(const MediaResult& aError) override;
void InputExhausted() override;
void DrainComplete() override;
bool OnReaderTaskQueue() override;
void Output(const MediaDataDecoder::DecodedData& aResults);
void InputExhausted();
Atomic<Benchmark*> mMainThreadState;
@ -47,9 +43,10 @@ class BenchmarkPlayback : public QueueObject, private MediaDataDecoderCallback
RefPtr<MediaTrackDemuxer> mTrackDemuxer;
nsTArray<RefPtr<MediaRawData>> mSamples;
size_t mSampleIndex;
TimeStamp mDecodeStartTime;
Maybe<TimeStamp> mDecodeStartTime;
uint32_t mFrameCount;
bool mFinished;
bool mDrained;
};
// Init() must have been called at least once prior on the
@ -64,7 +61,9 @@ public:
Parameters()
: mFramesToMeasure(-1)
, mStartupFrame(1)
, mTimeout(TimeDuration::Forever()) {}
, mTimeout(TimeDuration::Forever())
{
}
Parameters(int32_t aFramesToMeasure,
uint32_t aStartupFrame,
@ -73,7 +72,9 @@ public:
: mFramesToMeasure(aFramesToMeasure)
, mStartupFrame(aStartupFrame)
, mStopAtFrame(Some(aStopAtFrame))
, mTimeout(aTimeout) {}
, mTimeout(aTimeout)
{
}
const int32_t mFramesToMeasure;
const uint32_t mStartupFrame;
@ -83,7 +84,8 @@ public:
typedef MozPromise<uint32_t, bool, /* IsExclusive = */ true> BenchmarkPromise;
explicit Benchmark(MediaDataDemuxer* aDemuxer, const Parameters& aParameters = Parameters());
explicit Benchmark(MediaDataDemuxer* aDemuxer,
const Parameters& aParameters = Parameters());
RefPtr<BenchmarkPromise> Run();
static void Init();

Просмотреть файл

@ -24,6 +24,23 @@
#define PREF_CUBEB_LATENCY_MSG "media.cubeb_latency_msg_frames"
#define PREF_CUBEB_LOG_LEVEL "media.cubeb.log_level"
#define MASK_MONO (1 << AudioConfig::CHANNEL_MONO)
#define MASK_MONO_LFE (MASK_MONO | (1 << AudioConfig::CHANNEL_LFE))
#define MASK_STEREO ((1 << AudioConfig::CHANNEL_LEFT) | (1 << AudioConfig::CHANNEL_RIGHT))
#define MASK_STEREO_LFE (MASK_STEREO | (1 << AudioConfig::CHANNEL_LFE))
#define MASK_3F (MASK_STEREO | (1 << AudioConfig::CHANNEL_CENTER))
#define MASK_3F_LFE (MASK_3F | (1 << AudioConfig::CHANNEL_LFE))
#define MASK_2F1 (MASK_STEREO | (1 << AudioConfig::CHANNEL_RCENTER))
#define MASK_2F1_LFE (MASK_2F1 | (1 << AudioConfig::CHANNEL_LFE))
#define MASK_3F1 (MASK_3F | (1 < AudioConfig::CHANNEL_RCENTER))
#define MASK_3F1_LFE (MASK_3F1 | (1 << AudioConfig::CHANNEL_LFE))
#define MASK_2F2 (MASK_STEREO | (1 << AudioConfig::CHANNEL_LS) | (1 << AudioConfig::CHANNEL_RS))
#define MASK_2F2_LFE (MASK_2F2 | (1 << AudioConfig::CHANNEL_LFE))
#define MASK_3F2 (MASK_3F | (1 << AudioConfig::CHANNEL_LS) | (1 << AudioConfig::CHANNEL_RS))
#define MASK_3F2_LFE (MASK_3F2 | (1 << AudioConfig::CHANNEL_LFE))
#define MASK_3F3R_LFE (MASK_3F2_LFE | (1 << AudioConfig::CHANNEL_RCENTER))
#define MASK_3F4_LFE (MASK_3F2_LFE | (1 << AudioConfig::CHANNEL_RLS) | (1 << AudioConfig::CHANNEL_RRS))
namespace mozilla {
namespace {
@ -94,6 +111,11 @@ const int CUBEB_BACKEND_UNKNOWN = CUBEB_BACKEND_INIT_FAILURE_FIRST + 2;
// visible on the querying thread/CPU.
uint32_t sPreferredSampleRate;
// We only support SMPTE layout in cubeb for now. If the value is
// CUBEB_LAYOUT_UNDEFINED, then it implies that the preferred layout is
// non-SMPTE format.
cubeb_channel_layout sPreferredChannelLayout;
} // namespace
extern LazyLogModule gAudioStreamLog;
@ -199,6 +221,61 @@ uint32_t PreferredSampleRate()
return sPreferredSampleRate;
}
bool InitPreferredChannelLayout()
{
StaticMutexAutoLock lock(sMutex);
if (sPreferredChannelLayout != 0) {
return true;
}
cubeb* context = GetCubebContextUnlocked();
if (!context) {
return false;
}
return cubeb_get_preferred_channel_layout(context,
&sPreferredChannelLayout) == CUBEB_OK
? true : false;
}
uint32_t PreferredChannelMap(uint32_t aChannels)
{
// The first element of the following mapping table is channel counts,
// and the second one is its bit mask. It will be used in many times,
// so we shoule avoid to allocate it in stack, or it will be created
// and removed repeatedly. Use static to allocate this local variable
// in data space instead of stack.
static uint32_t layoutInfo[CUBEB_LAYOUT_MAX][2] = {
{ 0, 0 }, // CUBEB_LAYOUT_UNDEFINED
{ 2, MASK_STEREO }, // CUBEB_LAYOUT_DUAL_MONO
{ 3, MASK_STEREO_LFE }, // CUBEB_LAYOUT_DUAL_MONO_LFE
{ 1, MASK_MONO }, // CUBEB_LAYOUT_MONO
{ 2, MASK_MONO_LFE }, // CUBEB_LAYOUT_MONO_LFE
{ 2, MASK_STEREO }, // CUBEB_LAYOUT_STEREO
{ 3, MASK_STEREO_LFE }, // CUBEB_LAYOUT_STEREO_LFE
{ 3, MASK_3F }, // CUBEB_LAYOUT_3F
{ 4, MASK_3F_LFE }, // CUBEB_LAYOUT_3F_LFE
{ 3, MASK_2F1 }, // CUBEB_LAYOUT_2F1
{ 4, MASK_2F1_LFE }, // CUBEB_LAYOUT_2F1_LFE
{ 4, MASK_3F1 }, // CUBEB_LAYOUT_3F1
{ 5, MASK_3F1_LFE }, // CUBEB_LAYOUT_3F1_LFE
{ 4, MASK_2F2 }, // CUBEB_LAYOUT_2F2
{ 5, MASK_2F2_LFE }, // CUBEB_LAYOUT_2F2_LFE
{ 5, MASK_3F2 }, // CUBEB_LAYOUT_3F2
{ 6, MASK_3F2_LFE }, // CUBEB_LAYOUT_3F2_LFE
{ 7, MASK_3F3R_LFE }, // CUBEB_LAYOUT_3F3R_LFE
{ 8, MASK_3F4_LFE }, // CUBEB_LAYOUT_3F4_LFE
};
// Use SMPTE default channel map if we can't get preferred layout
// or the channel counts of preferred layout is different from input's one
if (!InitPreferredChannelLayout()
|| layoutInfo[sPreferredChannelLayout][0] != aChannels) {
AudioConfig::ChannelLayout smpteLayout(aChannels);
return smpteLayout.Map();
}
return layoutInfo[sPreferredChannelLayout][1];
}
void InitBrandName()
{
if (sBrandName) {
@ -359,6 +436,31 @@ uint32_t MaxNumberOfChannels()
return 0;
}
cubeb_channel_layout ConvertChannelMapToCubebLayout(uint32_t aChannelMap)
{
switch(aChannelMap) {
case MASK_MONO: return CUBEB_LAYOUT_MONO;
case MASK_MONO_LFE: return CUBEB_LAYOUT_MONO_LFE;
case MASK_STEREO: return CUBEB_LAYOUT_STEREO;
case MASK_STEREO_LFE: return CUBEB_LAYOUT_STEREO_LFE;
case MASK_3F: return CUBEB_LAYOUT_3F;
case MASK_3F_LFE: return CUBEB_LAYOUT_3F_LFE;
case MASK_2F1: return CUBEB_LAYOUT_2F1;
case MASK_2F1_LFE: return CUBEB_LAYOUT_2F1_LFE;
case MASK_3F1: return CUBEB_LAYOUT_3F1;
case MASK_3F1_LFE: return CUBEB_LAYOUT_3F1_LFE;
case MASK_2F2: return CUBEB_LAYOUT_2F2;
case MASK_2F2_LFE: return CUBEB_LAYOUT_2F2_LFE;
case MASK_3F2: return CUBEB_LAYOUT_3F2;
case MASK_3F2_LFE: return CUBEB_LAYOUT_3F2_LFE;
case MASK_3F3R_LFE: return CUBEB_LAYOUT_3F3R_LFE;
case MASK_3F4_LFE: return CUBEB_LAYOUT_3F4_LFE;
default:
NS_ERROR("The channel map is unsupported");
return CUBEB_LAYOUT_UNDEFINED;
}
}
#if defined(__ANDROID__) && defined(MOZ_B2G)
cubeb_stream_type ConvertChannelToCubebType(dom::AudioChannel aChannel)
{

Просмотреть файл

@ -30,6 +30,9 @@ uint32_t MaxNumberOfChannels();
// Get the sample rate the hardware/mixer runs at. Thread safe.
uint32_t PreferredSampleRate();
// Get the bit mask of the connected audio device's preferred layout.
uint32_t PreferredChannelMap(uint32_t aChannels);
void PrefChanged(const char* aPref, void* aClosure);
double GetVolumeScale();
bool GetFirstStream();
@ -40,6 +43,7 @@ void ReportCubebBackendUsed();
uint32_t GetCubebPlaybackLatencyInMilliseconds();
Maybe<uint32_t> GetCubebMSGLatencyInFrames();
bool CubebLatencyPrefSet();
cubeb_channel_layout ConvertChannelMapToCubebLayout(uint32_t aChannelMap);
#if defined(__ANDROID__) && defined(MOZ_B2G)
cubeb_stream_type ConvertChannelToCubebType(dom::AudioChannel aChannel);
#endif

Просмотреть файл

@ -724,7 +724,7 @@ DOMMediaStream::CloneInternal(TrackForwardingOption aForwarding)
LOG(LogLevel::Info, ("DOMMediaStream %p created clone %p, forwarding %s tracks",
this, newStream.get(),
aForwarding == TrackForwardingOption::ALL
? "all" : "current"));
? "all" : "current"));
MOZ_RELEASE_ASSERT(mPlaybackStream);
MOZ_RELEASE_ASSERT(mPlaybackStream->Graph());

Просмотреть файл

@ -67,8 +67,8 @@ void GraphDriver::SetGraphTime(GraphDriver* aPreviousDriver,
STREAM_LOG(LogLevel::Debug, ("Setting previous driver: %p (%s)",
aPreviousDriver,
aPreviousDriver->AsAudioCallbackDriver()
? "AudioCallbackDriver"
: "SystemClockDriver"));
? "AudioCallbackDriver"
: "SystemClockDriver"));
SetPreviousDriver(aPreviousDriver);
}

Просмотреть файл

@ -1526,8 +1526,8 @@ MediaCache::AllocateAndWriteBlock(MediaCacheStream* aStream, const void* aData,
bo->mLastUseTime = now;
stream->mBlocks[streamBlockIndex] = blockIndex;
if (streamBlockIndex*BLOCK_SIZE < stream->mStreamOffset) {
bo->mClass = aMode == MediaCacheStream::MODE_PLAYBACK
? PLAYED_BLOCK : METADATA_BLOCK;
bo->mClass = aMode == MediaCacheStream::MODE_PLAYBACK ? PLAYED_BLOCK
: METADATA_BLOCK;
// This must be the most-recently-used block, since we
// marked it as used now (which may be slightly bogus, but we'll
// treat it as used for simplicity).
@ -1648,7 +1648,8 @@ MediaCache::NoteBlockUsage(MediaCacheStream* aStream, int32_t aBlockIndex,
GetListForBlock(bo)->RemoveBlock(aBlockIndex);
bo->mClass =
(aMode == MediaCacheStream::MODE_METADATA || bo->mClass == METADATA_BLOCK)
? METADATA_BLOCK : PLAYED_BLOCK;
? METADATA_BLOCK
: PLAYED_BLOCK;
// Since this is just being used now, it can definitely be at the front
// of mMetadataBlocks or mPlayedBlocks
GetListForBlock(bo)->AddFirstBlock(aBlockIndex);

Просмотреть файл

@ -270,12 +270,14 @@ typedef AlignedBuffer<int16_t> AlignedShortBuffer;
typedef AlignedBuffer<AudioDataValue> AlignedAudioBuffer;
// Container that holds media samples.
class MediaData {
class MediaData
{
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaData)
enum Type {
enum Type
{
AUDIO_DATA = 0,
VIDEO_DATA,
RAW_DATA,
@ -294,7 +296,8 @@ public:
, mDuration(aDuration)
, mFrames(aFrames)
, mKeyframe(false)
{}
{
}
// Type of contained data.
const Type mType;
@ -348,7 +351,8 @@ protected:
, mDuration(0)
, mFrames(aFrames)
, mKeyframe(false)
{}
{
}
virtual ~MediaData() {}
@ -356,7 +360,8 @@ protected:
// NullData is for decoder generating a sample which doesn't need to be
// rendered.
class NullData : public MediaData {
class NullData : public MediaData
{
public:
NullData(int64_t aOffset, int64_t aTime, int64_t aDuration)
: MediaData(NULL_DATA, aOffset, aTime, aDuration, 0)
@ -366,7 +371,8 @@ public:
};
// Holds chunk a decoded audio frames.
class AudioData : public MediaData {
class AudioData : public MediaData
{
public:
AudioData(int64_t aOffset,
@ -411,7 +417,7 @@ public:
AlignedAudioBuffer mAudioData;
protected:
~AudioData() {}
~AudioData() { }
};
namespace layers {
@ -422,7 +428,8 @@ class PlanarYCbCrImage;
class VideoInfo;
// Holds a decoded video frame, in YCbCr format. These are queued in the reader.
class VideoData : public MediaData {
class VideoData : public MediaData
{
public:
typedef gfx::IntRect IntRect;
typedef gfx::IntSize IntSize;
@ -437,8 +444,10 @@ public:
// 0 = Y
// 1 = Cb
// 2 = Cr
struct YCbCrBuffer {
struct Plane {
struct YCbCrBuffer
{
struct Plane
{
uint8_t* mData;
uint32_t mWidth;
uint32_t mHeight;
@ -451,7 +460,8 @@ public:
YUVColorSpace mYUVColorSpace = YUVColorSpace::BT601;
};
class Listener {
class Listener
{
public:
virtual void OnSentToCompositor() = 0;
virtual ~Listener() {}
@ -469,44 +479,48 @@ public:
// Creates a new VideoData containing a deep copy of aBuffer. May use aContainer
// to allocate an Image to hold the copied data.
static already_AddRefed<VideoData> CreateAndCopyData(const VideoInfo& aInfo,
ImageContainer* aContainer,
int64_t aOffset,
int64_t aTime,
int64_t aDuration,
const YCbCrBuffer &aBuffer,
bool aKeyframe,
int64_t aTimecode,
const IntRect& aPicture);
static already_AddRefed<VideoData> CreateAndCopyData(
const VideoInfo& aInfo,
ImageContainer* aContainer,
int64_t aOffset,
int64_t aTime,
int64_t aDuration,
const YCbCrBuffer &aBuffer,
bool aKeyframe,
int64_t aTimecode,
const IntRect& aPicture);
static already_AddRefed<VideoData> CreateAndCopyData(const VideoInfo& aInfo,
ImageContainer* aContainer,
int64_t aOffset,
int64_t aTime,
int64_t aDuration,
const YCbCrBuffer &aBuffer,
const YCbCrBuffer::Plane &aAlphaPlane,
bool aKeyframe,
int64_t aTimecode,
const IntRect& aPicture);
static already_AddRefed<VideoData> CreateAndCopyData(
const VideoInfo& aInfo,
ImageContainer* aContainer,
int64_t aOffset,
int64_t aTime,
int64_t aDuration,
const YCbCrBuffer &aBuffer,
const YCbCrBuffer::Plane &aAlphaPlane,
bool aKeyframe,
int64_t aTimecode,
const IntRect& aPicture);
static already_AddRefed<VideoData> CreateAndCopyIntoTextureClient(const VideoInfo& aInfo,
int64_t aOffset,
int64_t aTime,
int64_t aDuration,
layers::TextureClient* aBuffer,
bool aKeyframe,
int64_t aTimecode,
const IntRect& aPicture);
static already_AddRefed<VideoData> CreateAndCopyIntoTextureClient(
const VideoInfo& aInfo,
int64_t aOffset,
int64_t aTime,
int64_t aDuration,
layers::TextureClient* aBuffer,
bool aKeyframe,
int64_t aTimecode,
const IntRect& aPicture);
static already_AddRefed<VideoData> CreateFromImage(const VideoInfo& aInfo,
int64_t aOffset,
int64_t aTime,
int64_t aDuration,
const RefPtr<Image>& aImage,
bool aKeyframe,
int64_t aTimecode,
const IntRect& aPicture);
static already_AddRefed<VideoData> CreateFromImage(
const VideoInfo& aInfo,
int64_t aOffset,
int64_t aTime,
int64_t aDuration,
const RefPtr<Image>& aImage,
bool aKeyframe,
int64_t aTimecode,
const IntRect& aPicture);
// Initialize PlanarYCbCrImage. Only When aCopyData is true,
// video data is copied to PlanarYCbCrImage.
@ -553,7 +567,7 @@ protected:
class CryptoTrack
{
public:
CryptoTrack() : mValid(false), mMode(0), mIVSize(0) {}
CryptoTrack() : mValid(false), mMode(0), mIVSize(0) { }
bool mValid;
int32_t mMode;
int32_t mIVSize;
@ -620,7 +634,8 @@ private:
MediaRawData* mTarget;
};
class MediaRawData : public MediaData {
class MediaRawData : public MediaData
{
public:
MediaRawData();
MediaRawData(const uint8_t* aData, size_t aSize);
@ -676,10 +691,10 @@ private:
class MediaByteBuffer : public nsTArray<uint8_t> {
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaByteBuffer);
MediaByteBuffer() = default;
explicit MediaByteBuffer(size_t aCapacity) : nsTArray<uint8_t>(aCapacity) {}
explicit MediaByteBuffer(size_t aCapacity) : nsTArray<uint8_t>(aCapacity) { }
private:
~MediaByteBuffer() {}
~MediaByteBuffer() { }
};
} // namespace mozilla

Просмотреть файл

@ -55,8 +55,8 @@ public:
// aTrackNumber must be constrained between 0 and GetNumberTracks(aType) - 1
// The actual Track ID is to be retrieved by calling
// MediaTrackDemuxer::TrackInfo.
virtual already_AddRefed<MediaTrackDemuxer> GetTrackDemuxer(TrackInfo::TrackType aType,
uint32_t aTrackNumber) = 0;
virtual already_AddRefed<MediaTrackDemuxer> GetTrackDemuxer(
TrackInfo::TrackType aType, uint32_t aTrackNumber) = 0;
// Returns true if the underlying resource allows seeking.
virtual bool IsSeekable() const = 0;
@ -101,15 +101,17 @@ class MediaTrackDemuxer
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaTrackDemuxer)
class SamplesHolder {
class SamplesHolder
{
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(SamplesHolder)
nsTArray<RefPtr<MediaRawData>> mSamples;
private:
~SamplesHolder() {}
~SamplesHolder() { }
};
class SkipFailureHolder {
class SkipFailureHolder
{
public:
SkipFailureHolder(const MediaResult& aFailure, uint32_t aSkipped)
: mFailure(aFailure)
@ -119,9 +121,13 @@ public:
uint32_t mSkipped;
};
typedef MozPromise<media::TimeUnit, MediaResult, /* IsExclusive = */ true> SeekPromise;
typedef MozPromise<RefPtr<SamplesHolder>, MediaResult, /* IsExclusive = */ true> SamplesPromise;
typedef MozPromise<uint32_t, SkipFailureHolder, /* IsExclusive = */ true> SkipAccessPointPromise;
typedef MozPromise<media::TimeUnit, MediaResult, /* IsExclusive = */ true>
SeekPromise;
typedef MozPromise<RefPtr<SamplesHolder>, MediaResult,
/* IsExclusive = */ true>
SamplesPromise;
typedef MozPromise<uint32_t, SkipFailureHolder, /* IsExclusive = */ true>
SkipAccessPointPromise;
// Returns the TrackInfo (a.k.a Track Description) for this track.
// The TrackInfo returned will be:
@ -207,7 +213,7 @@ public:
}
protected:
virtual ~MediaTrackDemuxer() {}
virtual ~MediaTrackDemuxer() { }
};
} // namespace mozilla

Просмотреть файл

@ -92,7 +92,8 @@ class MediaMemoryTracker : public nsIMemoryReporter
static StaticRefPtr<MediaMemoryTracker> sUniqueInstance;
static MediaMemoryTracker* UniqueInstance() {
static MediaMemoryTracker* UniqueInstance()
{
if (!sUniqueInstance) {
sUniqueInstance = new MediaMemoryTracker();
sUniqueInstance->InitMemoryReporter();
@ -101,7 +102,8 @@ class MediaMemoryTracker : public nsIMemoryReporter
}
typedef nsTArray<MediaDecoder*> DecodersArray;
static DecodersArray& Decoders() {
static DecodersArray& Decoders()
{
return UniqueInstance()->mDecoders;
}
@ -441,7 +443,8 @@ MediaDecoder::MediaDecoder(MediaDecoderOwner* aOwner)
mWatchManager.Watch(mStateMachineDuration, &MediaDecoder::DurationChanged);
// mStateMachineIsShutdown
mWatchManager.Watch(mStateMachineIsShutdown, &MediaDecoder::ShutdownBitChanged);
mWatchManager.Watch(mStateMachineIsShutdown,
&MediaDecoder::ShutdownBitChanged);
// readyState
mWatchManager.Watch(mPlayState, &MediaDecoder::UpdateReadyState);
@ -458,7 +461,8 @@ MediaDecoder::MediaDecoder(MediaDecoderOwner* aOwner)
// mIgnoreProgressData
mWatchManager.Watch(mLogicallySeeking, &MediaDecoder::SeekingChanged);
mWatchManager.Watch(mIsAudioDataAudible, &MediaDecoder::NotifyAudibleStateChanged);
mWatchManager.Watch(mIsAudioDataAudible,
&MediaDecoder::NotifyAudibleStateChanged);
MediaShutdownManager::Instance().Register(this);
}
@ -648,8 +652,9 @@ MediaDecoder::SetStateMachineParameters()
mAbstractMainThread, this, &MediaDecoder::OnMetadataUpdate);
mMetadataLoadedListener = mDecoderStateMachine->MetadataLoadedEvent().Connect(
mAbstractMainThread, this, &MediaDecoder::MetadataLoaded);
mFirstFrameLoadedListener = mDecoderStateMachine->FirstFrameLoadedEvent().Connect(
mAbstractMainThread, this, &MediaDecoder::FirstFrameLoaded);
mFirstFrameLoadedListener =
mDecoderStateMachine->FirstFrameLoadedEvent().Connect(
mAbstractMainThread, this, &MediaDecoder::FirstFrameLoaded);
mOnPlaybackEvent = mDecoderStateMachine->OnPlaybackEvent().Connect(
mAbstractMainThread, this, &MediaDecoder::OnPlaybackEvent);
@ -695,7 +700,8 @@ MediaDecoder::Play()
}
nsresult
MediaDecoder::Seek(double aTime, SeekTarget::Type aSeekType, dom::Promise* aPromise /*=nullptr*/)
MediaDecoder::Seek(double aTime, SeekTarget::Type aSeekType,
dom::Promise* aPromise /*=nullptr*/)
{
MOZ_ASSERT(NS_IsMainThread());
MOZ_DIAGNOSTIC_ASSERT(!IsShutdown());
@ -813,7 +819,8 @@ MediaDecoder::MetadataLoaded(nsAutoPtr<MediaInfo> aInfo,
// our new size.
if (aEventVisibility != MediaDecoderEventVisibility::Suppressed) {
mFiredMetadataLoaded = true;
GetOwner()->MetadataLoaded(mInfo, nsAutoPtr<const MetadataTags>(aTags.forget()));
GetOwner()->MetadataLoaded(mInfo,
nsAutoPtr<const MetadataTags>(aTags.forget()));
}
// Invalidate() will end up calling GetOwner()->UpdateMediaSize with the last
// dimensions retrieved from the video frame container. The video frame
@ -838,15 +845,17 @@ MediaDecoder::EnsureTelemetryReported()
}
nsTArray<nsCString> codecs;
if (mInfo->HasAudio() && !mInfo->mAudio.GetAsAudioInfo()->mMimeType.IsEmpty()) {
if (mInfo->HasAudio()
&& !mInfo->mAudio.GetAsAudioInfo()->mMimeType.IsEmpty()) {
codecs.AppendElement(mInfo->mAudio.GetAsAudioInfo()->mMimeType);
}
if (mInfo->HasVideo() && !mInfo->mVideo.GetAsVideoInfo()->mMimeType.IsEmpty()) {
if (mInfo->HasVideo()
&& !mInfo->mVideo.GetAsVideoInfo()->mMimeType.IsEmpty()) {
codecs.AppendElement(mInfo->mVideo.GetAsVideoInfo()->mMimeType);
}
if (codecs.IsEmpty()) {
codecs.AppendElement(nsPrintfCString("resource; %s",
mResource->GetContentType().OriginalString().Data()));
codecs.AppendElement(nsPrintfCString(
"resource; %s", mResource->GetContentType().OriginalString().Data()));
}
for (const nsCString& codec : codecs) {
DECODER_LOG("Telemetry MEDIA_CODEC_USED= '%s'", codec.get());
@ -870,9 +879,10 @@ MediaDecoder::FirstFrameLoaded(nsAutoPtr<MediaInfo> aInfo,
MOZ_ASSERT(NS_IsMainThread());
MOZ_DIAGNOSTIC_ASSERT(!IsShutdown());
DECODER_LOG("FirstFrameLoaded, channels=%u rate=%u hasAudio=%d hasVideo=%d mPlayState=%s",
aInfo->mAudio.mChannels, aInfo->mAudio.mRate,
aInfo->HasAudio(), aInfo->HasVideo(), PlayStateStr());
DECODER_LOG("FirstFrameLoaded, channels=%u rate=%u hasAudio=%d hasVideo=%d "
"mPlayState=%s",
aInfo->mAudio.mChannels, aInfo->mAudio.mRate, aInfo->HasAudio(),
aInfo->HasVideo(), PlayStateStr());
mInfo = aInfo.forget();
@ -962,8 +972,10 @@ already_AddRefed<GMPCrashHelper>
MediaDecoder::GetCrashHelper()
{
MOZ_ASSERT(NS_IsMainThread());
return GetOwner()->GetMediaElement() ?
MakeAndAddRef<MediaElementGMPCrashHelper>(GetOwner()->GetMediaElement()) : nullptr;
return GetOwner()->GetMediaElement()
? MakeAndAddRef<MediaElementGMPCrashHelper>(
GetOwner()->GetMediaElement())
: nullptr;
}
bool
@ -999,8 +1011,8 @@ MediaDecoder::PlaybackEnded()
InvalidateWithFlags(VideoFrameContainer::INVALIDATE_FORCE);
GetOwner()->PlaybackEnded();
// This must be called after |GetOwner()->PlaybackEnded()| call above, in order
// to fire the required durationchange.
// This must be called after |GetOwner()->PlaybackEnded()| call above, in
// order to fire the required durationchange.
if (IsInfinite()) {
SetInfinite(false);
}
@ -1013,7 +1025,8 @@ MediaDecoder::GetStatistics()
MOZ_ASSERT(mResource);
MediaStatistics result;
result.mDownloadRate = mResource->GetDownloadRate(&result.mDownloadRateReliable);
result.mDownloadRate =
mResource->GetDownloadRate(&result.mDownloadRateReliable);
result.mDownloadPosition = mResource->GetCachedDataEnd(mDecoderPosition);
result.mTotalBytes = mResource->GetLength();
result.mPlaybackRate = mPlaybackBytesPerSecond;
@ -1030,7 +1043,8 @@ MediaDecoder::ComputePlaybackRate()
MOZ_ASSERT(mResource);
int64_t length = mResource->GetLength();
if (!IsNaN(mDuration) && !mozilla::IsInfinite<double>(mDuration) && length >= 0) {
if (!IsNaN(mDuration) && !mozilla::IsInfinite<double>(mDuration)
&& length >= 0) {
mPlaybackRateReliable = true;
mPlaybackBytesPerSecond = length / mDuration;
return;
@ -1199,7 +1213,8 @@ MediaDecoder::UpdateLogicalPositionInternal()
MOZ_ASSERT(NS_IsMainThread());
MOZ_DIAGNOSTIC_ASSERT(!IsShutdown());
double currentPosition = static_cast<double>(CurrentPosition()) / static_cast<double>(USECS_PER_S);
double currentPosition =
static_cast<double>(CurrentPosition()) / static_cast<double>(USECS_PER_S);
if (mPlayState == PLAY_STATE_ENDED) {
currentPosition = std::max(currentPosition, mDuration);
}
@ -1243,8 +1258,9 @@ MediaDecoder::DurationChanged()
// See https://www.w3.org/Bugs/Public/show_bug.cgi?id=28822 for a discussion
// of whether we should fire durationchange on explicit infinity.
if (mFiredMetadataLoaded &&
(!mozilla::IsInfinite<double>(mDuration) || mExplicitDuration.Ref().isSome())) {
if (mFiredMetadataLoaded
&& (!mozilla::IsInfinite<double>(mDuration)
|| mExplicitDuration.Ref().isSome())) {
GetOwner()->DispatchAsyncEvent(NS_LITERAL_STRING("durationchange"));
}
@ -1282,8 +1298,10 @@ MediaDecoder::UpdateEstimatedMediaDuration(int64_t aDuration)
// the current estimate, as the incoming duration is an estimate and so
// often is unstable as more data is read and the estimate is updated.
// Can result in a durationchangeevent. aDuration is in microseconds.
if (mEstimatedDuration.Ref().isSome() &&
mozilla::Abs(mEstimatedDuration.Ref().ref().ToMicroseconds() - aDuration) < ESTIMATED_DURATION_FUZZ_FACTOR_USECS) {
if (mEstimatedDuration.Ref().isSome()
&& mozilla::Abs(mEstimatedDuration.Ref().ref().ToMicroseconds()
- aDuration)
< ESTIMATED_DURATION_FUZZ_FACTOR_USECS) {
return;
}
@ -1327,9 +1345,9 @@ MediaDecoder::GetSeekable()
} else {
return media::TimeIntervals(
media::TimeInterval(media::TimeUnit::FromMicroseconds(0),
IsInfinite() ?
media::TimeUnit::FromInfinity() :
media::TimeUnit::FromSeconds(GetDuration())));
IsInfinite()
? media::TimeUnit::FromInfinity()
: media::TimeUnit::FromSeconds(GetDuration())));
}
}
@ -1338,7 +1356,8 @@ MediaDecoder::SetFragmentEndTime(double aTime)
{
MOZ_ASSERT(NS_IsMainThread());
if (mDecoderStateMachine) {
mDecoderStateMachine->DispatchSetFragmentEndTime(static_cast<int64_t>(aTime * USECS_PER_S));
mDecoderStateMachine->DispatchSetFragmentEndTime(
static_cast<int64_t>(aTime * USECS_PER_S));
}
}
@ -1443,7 +1462,8 @@ MediaDecoder::SetStateMachine(MediaDecoderStateMachine* aStateMachine)
ImageContainer*
MediaDecoder::GetImageContainer()
{
return mVideoFrameContainer ? mVideoFrameContainer->GetImageContainer() : nullptr;
return mVideoFrameContainer ? mVideoFrameContainer->GetImageContainer()
: nullptr;
}
void
@ -1465,13 +1485,15 @@ MediaDecoder::Invalidate()
// Constructs the time ranges representing what segments of the media
// are buffered and playable.
media::TimeIntervals
MediaDecoder::GetBuffered() {
MediaDecoder::GetBuffered()
{
MOZ_ASSERT(NS_IsMainThread());
return mBuffered.Ref();
}
size_t
MediaDecoder::SizeOfVideoQueue() {
MediaDecoder::SizeOfVideoQueue()
{
MOZ_ASSERT(NS_IsMainThread());
if (mDecoderStateMachine) {
return mDecoderStateMachine->SizeOfVideoQueue();
@ -1480,7 +1502,8 @@ MediaDecoder::SizeOfVideoQueue() {
}
size_t
MediaDecoder::SizeOfAudioQueue() {
MediaDecoder::SizeOfAudioQueue()
{
MOZ_ASSERT(NS_IsMainThread());
if (mDecoderStateMachine) {
return mDecoderStateMachine->SizeOfAudioQueue();
@ -1488,15 +1511,18 @@ MediaDecoder::SizeOfAudioQueue() {
return 0;
}
void MediaDecoder::AddSizeOfResources(ResourceSizes* aSizes) {
void MediaDecoder::AddSizeOfResources(ResourceSizes* aSizes)
{
MOZ_ASSERT(NS_IsMainThread());
if (GetResource()) {
aSizes->mByteSize += GetResource()->SizeOfIncludingThis(aSizes->mMallocSizeOf);
aSizes->mByteSize +=
GetResource()->SizeOfIncludingThis(aSizes->mMallocSizeOf);
}
}
void
MediaDecoder::NotifyDataArrived() {
MediaDecoder::NotifyDataArrived()
{
MOZ_ASSERT(NS_IsMainThread());
MOZ_DIAGNOSTIC_ASSERT(!IsShutdown());
mDataArrivedEvent.Notify();
@ -1504,7 +1530,8 @@ MediaDecoder::NotifyDataArrived() {
// Provide access to the state machine object
MediaDecoderStateMachine*
MediaDecoder::GetStateMachine() const {
MediaDecoder::GetStateMachine() const
{
MOZ_ASSERT(NS_IsMainThread());
return mDecoderStateMachine;
}
@ -1592,9 +1619,9 @@ MediaDecoder::IsWebMEnabled()
bool
MediaDecoder::IsAndroidMediaPluginEnabled()
{
return AndroidBridge::Bridge() &&
AndroidBridge::Bridge()->GetAPIVersion() < 16 &&
Preferences::GetBool("media.plugins.enabled");
return AndroidBridge::Bridge()
&& AndroidBridge::Bridge()->GetAPIVersion() < 16
&& Preferences::GetBool("media.plugins.enabled");
}
#endif
@ -1602,8 +1629,6 @@ NS_IMETHODIMP
MediaMemoryTracker::CollectReports(nsIHandleReportCallback* aHandleReport,
nsISupports* aData, bool aAnonymize)
{
int64_t video = 0, audio = 0;
// NB: When resourceSizes' ref count goes to 0 the promise will report the
// resources memory and finish the asynchronous memory report.
RefPtr<MediaDecoder::ResourceSizes> resourceSizes =
@ -1613,7 +1638,8 @@ MediaMemoryTracker::CollectReports(nsIHandleReportCallback* aHandleReport,
nsCOMPtr<nsISupports> data = aData;
resourceSizes->Promise()->Then(
// Non-DocGroup version of AbstractThread::MainThread is fine for memory report.
// Non-DocGroup version of AbstractThread::MainThread is fine for memory
// report.
AbstractThread::MainThread(),
__func__,
[handleReport, data] (size_t size) {
@ -1633,6 +1659,8 @@ MediaMemoryTracker::CollectReports(nsIHandleReportCallback* aHandleReport,
},
[] (size_t) { /* unused reject function */ });
int64_t video = 0;
int64_t audio = 0;
DecodersArray& decoders = Decoders();
for (size_t i = 0; i < decoders.Length(); ++i) {
MediaDecoder* decoder = decoders[i];
@ -1731,11 +1759,13 @@ MediaDecoder::NextFrameBufferedStatus()
// Use the buffered range to consider if we have the next frame available.
media::TimeUnit currentPosition =
media::TimeUnit::FromMicroseconds(CurrentPosition());
media::TimeInterval interval(currentPosition,
currentPosition + media::TimeUnit::FromMicroseconds(DEFAULT_NEXT_FRAME_AVAILABLE_BUFFERED));
media::TimeInterval interval(
currentPosition,
currentPosition
+ media::TimeUnit::FromMicroseconds(DEFAULT_NEXT_FRAME_AVAILABLE_BUFFERED));
return GetBuffered().Contains(interval)
? MediaDecoderOwner::NEXT_FRAME_AVAILABLE
: MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE;
? MediaDecoderOwner::NEXT_FRAME_AVAILABLE
: MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE;
}
nsCString

Просмотреть файл

@ -62,7 +62,8 @@ class MediaDecoder : public AbstractMediaDecoder
public:
// Used to register with MediaResource to receive notifications which will
// be forwarded to MediaDecoder.
class ResourceCallback : public MediaResourceCallback {
class ResourceCallback : public MediaResourceCallback
{
// Throttle calls to MediaDecoder::NotifyDataArrived()
// to be at most once per 500ms.
static const uint32_t sDelay = 500;
@ -96,12 +97,15 @@ public:
const RefPtr<AbstractThread> mAbstractMainThread;
};
typedef MozPromise<bool /* aIgnored */, bool /* aIgnored */, /* IsExclusive = */ true> SeekPromise;
typedef MozPromise<bool /* aIgnored */, bool /* aIgnored */,
/* IsExclusive = */ true>
SeekPromise;
NS_DECL_THREADSAFE_ISUPPORTS
// Enumeration for the valid play states (see mPlayState)
enum PlayState {
enum PlayState
{
PLAY_STATE_START,
PLAY_STATE_LOADING,
PLAY_STATE_PAUSED,
@ -207,7 +211,8 @@ public:
// Add an output stream. All decoder output will be sent to the stream.
// The stream is initially blocked. The decoder is responsible for unblocking
// it while it is playing back.
virtual void AddOutputStream(ProcessedMediaStream* aStream, bool aFinishWhenEnded);
virtual void AddOutputStream(ProcessedMediaStream* aStream,
bool aFinishWhenEnded);
// Remove an output stream added with AddOutputStream.
virtual void RemoveOutputStream(MediaStream* aStream);
@ -430,7 +435,9 @@ private:
return mAbstractMainThread;
}
typedef MozPromise<RefPtr<CDMProxy>, bool /* aIgnored */, /* IsExclusive = */ true> CDMProxyPromise;
typedef MozPromise<RefPtr<CDMProxy>, bool /* aIgnored */,
/* IsExclusive = */ true>
CDMProxyPromise;
// Resolved when a CDMProxy is available and the capabilities are known or
// rejected when this decoder is about to shut down.
@ -476,12 +483,15 @@ private:
GetOwner()->UpdateReadyState();
}
virtual MediaDecoderOwner::NextFrameStatus NextFrameStatus() { return mNextFrameStatus; }
virtual MediaDecoderOwner::NextFrameStatus NextFrameStatus()
{
return mNextFrameStatus;
}
virtual MediaDecoderOwner::NextFrameStatus NextFrameBufferedStatus();
// Returns a string describing the state of the media player internal
// data. Used for debugging purposes.
virtual void GetMozDebugReaderData(nsACString& aString) {}
virtual void GetMozDebugReaderData(nsACString& aString) { }
virtual void DumpDebugInfo();
@ -787,45 +797,46 @@ protected:
public:
AbstractCanonical<media::NullableTimeUnit>* CanonicalDurationOrNull() override;
AbstractCanonical<double>* CanonicalVolume() {
return &mVolume;
}
AbstractCanonical<bool>* CanonicalPreservesPitch() {
AbstractCanonical<double>* CanonicalVolume() { return &mVolume; }
AbstractCanonical<bool>* CanonicalPreservesPitch()
{
return &mPreservesPitch;
}
AbstractCanonical<media::NullableTimeUnit>* CanonicalEstimatedDuration() {
AbstractCanonical<media::NullableTimeUnit>* CanonicalEstimatedDuration()
{
return &mEstimatedDuration;
}
AbstractCanonical<Maybe<double>>* CanonicalExplicitDuration() {
AbstractCanonical<Maybe<double>>* CanonicalExplicitDuration()
{
return &mExplicitDuration;
}
AbstractCanonical<PlayState>* CanonicalPlayState() {
return &mPlayState;
}
AbstractCanonical<PlayState>* CanonicalNextPlayState() {
return &mNextState;
}
AbstractCanonical<bool>* CanonicalLogicallySeeking() {
AbstractCanonical<PlayState>* CanonicalPlayState() { return &mPlayState; }
AbstractCanonical<PlayState>* CanonicalNextPlayState() { return &mNextState; }
AbstractCanonical<bool>* CanonicalLogicallySeeking()
{
return &mLogicallySeeking;
}
AbstractCanonical<bool>* CanonicalSameOriginMedia() {
AbstractCanonical<bool>* CanonicalSameOriginMedia()
{
return &mSameOriginMedia;
}
AbstractCanonical<PrincipalHandle>* CanonicalMediaPrincipalHandle() {
AbstractCanonical<PrincipalHandle>* CanonicalMediaPrincipalHandle()
{
return &mMediaPrincipalHandle;
}
AbstractCanonical<double>* CanonicalPlaybackBytesPerSecond() {
AbstractCanonical<double>* CanonicalPlaybackBytesPerSecond()
{
return &mPlaybackBytesPerSecond;
}
AbstractCanonical<bool>* CanonicalPlaybackRateReliable() {
AbstractCanonical<bool>* CanonicalPlaybackRateReliable()
{
return &mPlaybackRateReliable;
}
AbstractCanonical<int64_t>* CanonicalDecoderPosition() {
AbstractCanonical<int64_t>* CanonicalDecoderPosition()
{
return &mDecoderPosition;
}
AbstractCanonical<bool>* CanonicalIsVisible() {
return &mIsVisible;
}
AbstractCanonical<bool>* CanonicalIsVisible() { return &mIsVisible; }
private:
// Notify owner when the audible state changed

Просмотреть файл

@ -29,13 +29,16 @@ class MediaDecoderReader;
struct WaitForDataRejectValue
{
enum Reason {
enum Reason
{
SHUTDOWN,
CANCELED
};
WaitForDataRejectValue(MediaData::Type aType, Reason aReason)
:mType(aType), mReason(aReason) {}
:mType(aType), mReason(aReason)
{
}
MediaData::Type mType;
Reason mReason;
};
@ -43,11 +46,11 @@ struct WaitForDataRejectValue
struct SeekRejectValue
{
MOZ_IMPLICIT SeekRejectValue(const MediaResult& aError)
: mType(MediaData::NULL_DATA), mError(aError) {}
: mType(MediaData::NULL_DATA), mError(aError) { }
MOZ_IMPLICIT SeekRejectValue(nsresult aResult)
: mType(MediaData::NULL_DATA), mError(aResult) {}
: mType(MediaData::NULL_DATA), mError(aResult) { }
SeekRejectValue(MediaData::Type aType, const MediaResult& aError)
: mType(aType), mError(aError) {}
: mType(aType), mError(aError) { }
MediaData::Type mType;
MediaResult mError;
};
@ -60,7 +63,7 @@ public:
nsAutoPtr<MetadataTags> mTags;
private:
virtual ~MetadataHolder() {}
virtual ~MetadataHolder() { }
};
// Encapsulates the decoding and reading of media data. Reading can either
@ -69,7 +72,8 @@ private:
// callback.
// Unless otherwise specified, methods and fields of this class can only
// be accessed on the decode task queue.
class MediaDecoderReader {
class MediaDecoderReader
{
friend class ReRequestVideoWithSkipTask;
friend class ReRequestAudioTask;
@ -104,7 +108,7 @@ public:
// Called by MDSM in dormant state to release resources allocated by this
// reader. The reader can resume decoding by calling Seek() to a specific
// position.
virtual void ReleaseResources() {}
virtual void ReleaseResources() { }
// Destroys the decoding state. The reader cannot be made usable again.
// This is different from ReleaseMediaResources() as it is irreversable,
@ -128,8 +132,9 @@ public:
//
// aParam is a set of TrackInfo::TrackType enums specifying which
// queues need to be reset, defaulting to both audio and video tracks.
virtual nsresult ResetDecode(TrackSet aTracks = TrackSet(TrackInfo::kAudioTrack,
TrackInfo::kVideoTrack));
virtual nsresult ResetDecode(
TrackSet aTracks = TrackSet(TrackInfo::kAudioTrack,
TrackInfo::kVideoTrack));
// Requests one audio sample from the reader.
//
@ -230,7 +235,7 @@ public:
return mTimedMetadataEvent;
}
// Notified by the OggReader during playback when chained ogg is detected.
// Notified by the OggDemuxer during playback when chained ogg is detected.
MediaEventSource<void>& OnMediaNotSeekable() { return mOnMediaNotSeekable; }
TimedMetadataEventProducer& TimedMetadataProducer()
@ -243,10 +248,22 @@ public:
return mOnMediaNotSeekable;
}
// Notified if the reader can't decode a sample due to a missing decryption
// key.
MediaEventSource<TrackInfo::TrackType>& OnTrackWaitingForKey()
{
return mOnTrackWaitingForKey;
}
MediaEventProducer<TrackInfo::TrackType>& OnTrackWaitingForKeyProducer()
{
return mOnTrackWaitingForKey;
}
// Switch the video decoder to BlankDecoderModule. It might takes effective
// since a few samples later depends on how much demuxed samples are already
// queued in the original video decoder.
virtual void SetVideoBlankDecode(bool aIsBlankDecode) {}
virtual void SetVideoBlankDecode(bool aIsBlankDecode) { }
protected:
virtual ~MediaDecoderReader();
@ -306,6 +323,9 @@ protected:
// Notify if this media is not seekable.
MediaEventProducer<void> mOnMediaNotSeekable;
// Notify if we are waiting for a decryption key.
MediaEventProducer<TrackInfo::TrackType> mOnTrackWaitingForKey;
private:
virtual nsresult InitInternal() { return NS_OK; }

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -114,7 +114,8 @@ class TaskQueue;
extern LazyLogModule gMediaDecoderLog;
extern LazyLogModule gMediaSampleLog;
enum class MediaEventType : int8_t {
enum class MediaEventType : int8_t
{
PlaybackStarted,
PlaybackStopped,
PlaybackEnded,
@ -150,7 +151,8 @@ public:
nsresult Init(MediaDecoder* aDecoder);
// Enumeration for the valid decoding states
enum State {
enum State
{
DECODER_STATE_DECODING_METADATA,
DECODER_STATE_WAIT_FOR_CDM,
DECODER_STATE_DORMANT,
@ -430,7 +432,8 @@ protected:
// [mStartTime, mEndTime], and mStartTime will not be 0 if the media does
// not start at 0. Note this is different than the "current playback position",
// which is in the range [0,duration].
int64_t GetMediaTime() const {
int64_t GetMediaTime() const
{
MOZ_ASSERT(OnTaskQueue());
return mCurrentPosition;
}
@ -494,7 +497,11 @@ private:
UniquePtr<StateObject> mStateObj;
media::TimeUnit Duration() const { MOZ_ASSERT(OnTaskQueue()); return mDuration.Ref().ref(); }
media::TimeUnit Duration() const
{
MOZ_ASSERT(OnTaskQueue());
return mDuration.Ref().ref();
}
// Recomputes the canonical duration from various sources.
void RecomputeDuration();
@ -515,8 +522,8 @@ private:
bool IsLogicallyPlaying()
{
MOZ_ASSERT(OnTaskQueue());
return mPlayState == MediaDecoder::PLAY_STATE_PLAYING ||
mNextPlayState == MediaDecoder::PLAY_STATE_PLAYING;
return mPlayState == MediaDecoder::PLAY_STATE_PLAYING
|| mNextPlayState == MediaDecoder::PLAY_STATE_PLAYING;
}
// Media Fragment end time in microseconds. Access controlled by decoder monitor.
@ -751,22 +758,25 @@ private:
public:
AbstractCanonical<media::TimeIntervals>* CanonicalBuffered() const;
AbstractCanonical<media::NullableTimeUnit>* CanonicalDuration() {
AbstractCanonical<media::NullableTimeUnit>* CanonicalDuration()
{
return &mDuration;
}
AbstractCanonical<bool>* CanonicalIsShutdown() {
return &mIsShutdown;
}
AbstractCanonical<NextFrameStatus>* CanonicalNextFrameStatus() {
AbstractCanonical<bool>* CanonicalIsShutdown() { return &mIsShutdown; }
AbstractCanonical<NextFrameStatus>* CanonicalNextFrameStatus()
{
return &mNextFrameStatus;
}
AbstractCanonical<int64_t>* CanonicalCurrentPosition() {
AbstractCanonical<int64_t>* CanonicalCurrentPosition()
{
return &mCurrentPosition;
}
AbstractCanonical<int64_t>* CanonicalPlaybackOffset() {
AbstractCanonical<int64_t>* CanonicalPlaybackOffset()
{
return &mPlaybackOffset;
}
AbstractCanonical<bool>* CanonicalIsAudioDataAudible() {
AbstractCanonical<bool>* CanonicalIsAudioDataAudible()
{
return &mIsAudioDataAudible;
}
};

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -109,13 +109,15 @@ private:
void DecodeDemuxedSamples(TrackType aTrack,
MediaRawData* aSample);
struct InternalSeekTarget {
struct InternalSeekTarget
{
InternalSeekTarget(const media::TimeInterval& aTime, bool aDropTarget)
: mTime(aTime)
, mDropTarget(aDropTarget)
, mWaiting(false)
, mHasSeeked(false)
{}
{
}
media::TimeUnit Time() const { return mTime.mStart; }
media::TimeUnit EndTime() const { return mTime.mEnd; }
@ -136,8 +138,8 @@ private:
// Drain the current decoder.
void DrainDecoder(TrackType aTrack);
void NotifyNewOutput(TrackType aTrack, MediaData* aSample);
void NotifyInputExhausted(TrackType aTrack);
void NotifyNewOutput(TrackType aTrack,
const MediaDataDecoder::DecodedData& aResults);
void NotifyDrainComplete(TrackType aTrack);
void NotifyError(TrackType aTrack, const MediaResult& aError);
void NotifyWaitingForData(TrackType aTrack);
@ -149,15 +151,8 @@ private:
// Initializes mLayersBackendType if possible.
void InitLayersBackendType();
// DecoderCallback proxies the MediaDataDecoderCallback calls to these
// functions.
void Output(TrackType aType, MediaData* aSample);
void InputExhausted(TrackType aTrack);
void Error(TrackType aTrack, const MediaResult& aError);
void Reset(TrackType aTrack);
void DrainComplete(TrackType aTrack);
void DropDecodedSamples(TrackType aTrack);
void WaitingForKey(TrackType aTrack);
bool ShouldSkip(bool aSkipToNextKeyframe, media::TimeUnit aTimeThreshold);
@ -167,41 +162,8 @@ private:
RefPtr<PDMFactory> mPlatform;
class DecoderCallback : public MediaDataDecoderCallback {
public:
DecoderCallback(MediaFormatReader* aReader, TrackType aType)
: mReader(aReader)
, mType(aType)
{
}
void Output(MediaData* aSample) override {
mReader->Output(mType, aSample);
}
void InputExhausted() override {
mReader->InputExhausted(mType);
}
void Error(const MediaResult& aError) override {
mReader->Error(mType, aError);
}
void DrainComplete() override {
mReader->DrainComplete(mType);
}
void ReleaseMediaResources() override {
mReader->ReleaseResources();
}
bool OnReaderTaskQueue() override {
return mReader->OnTaskQueue();
}
void WaitingForKey() override {
mReader->WaitingForKey(mType);
}
private:
MediaFormatReader* mReader;
TrackType mType;
};
struct DecoderData {
struct DecoderData
{
DecoderData(MediaFormatReader* aOwner,
MediaData::Type aType,
uint32_t aNumOfMaxError)
@ -214,11 +176,10 @@ private:
, mWaitingForData(false)
, mWaitingForKey(false)
, mReceivedNewData(false)
, mOutputRequested(false)
, mDecodePending(false)
, mNeedDraining(false)
, mDraining(false)
, mDrainComplete(false)
, mFlushed(true)
, mNumOfConsecutiveError(0)
, mMaxConsecutiveError(aNumOfMaxError)
, mNumSamplesInput(0)
@ -229,7 +190,8 @@ private:
, mIsHardwareAccelerated(false)
, mLastStreamSourceID(UINT32_MAX)
, mIsBlankDecode(false)
{}
{
}
MediaFormatReader* mOwner;
// Disambiguate Audio vs Video.
@ -238,8 +200,6 @@ private:
// TaskQueue on which decoder can choose to decode.
// Only non-null up until the decoder is created.
RefPtr<TaskQueue> mTaskQueue;
// Callback that receives output and error notifications from the decoder.
nsAutoPtr<DecoderCallback> mCallback;
// Mutex protecting mDescription and mDecoder.
Mutex mMutex;
@ -250,7 +210,19 @@ private:
{
MutexAutoLock lock(mMutex);
if (mDecoder) {
mDecoder->Shutdown();
RefPtr<MediaFormatReader> owner = mOwner;
TrackType type = mType == MediaData::AUDIO_DATA
? TrackType::kAudioTrack
: TrackType::kVideoTrack;
mDecoder->Shutdown()
->Then(mOwner->OwnerThread(), __func__,
[owner, this, type]() {
mShutdownRequest.Complete();
mShutdownPromise.ResolveIfExists(true, __func__);
owner->ScheduleUpdate(type);
},
[]() { MOZ_RELEASE_ASSERT(false, "Can't ever be here"); })
->Track(mShutdownRequest);
}
mDescription = "shutdown";
mDecoder = nullptr;
@ -284,16 +256,16 @@ private:
}
// MediaDataDecoder handler's variables.
bool mOutputRequested;
// Set to true once the MediaDataDecoder has been fed a compressed sample.
// No more samples will be passed to the decoder while true.
// mDecodePending is reset when:
// 1- The decoder calls InputExhausted
// 2- The decoder is Flushed or Reset.
bool mDecodePending;
MozPromiseRequestHolder<MediaDataDecoder::DecodePromise> mDecodeRequest;
bool mNeedDraining;
MozPromiseRequestHolder<MediaDataDecoder::DecodePromise> mDrainRequest;
bool mDraining;
bool mDrainComplete;
MozPromiseRequestHolder<MediaDataDecoder::FlushPromise> mFlushRequest;
// Set to true if the last operation run on the decoder was a flush.
bool mFlushed;
MozPromiseHolder<ShutdownPromise> mShutdownPromise;
MozPromiseRequestHolder<ShutdownPromise> mShutdownRequest;
bool HasPendingDrain() const
{
@ -357,26 +329,51 @@ private:
}
// Flush the decoder if present and reset decoding related data.
// Decoding will be suspended until mInputRequested is set again.
// Following a flush, the decoder is ready to accept any new data.
void Flush()
{
if (mDecoder) {
mDecoder->Flush();
if (mFlushRequest.Exists() || mFlushed) {
// Flush still pending or already flushed, nothing more to do.
return;
}
mOutputRequested = false;
mDecodePending = false;
mDecodeRequest.DisconnectIfExists();
mDrainRequest.DisconnectIfExists();
mOutput.Clear();
mNumSamplesInput = 0;
mNumSamplesOutput = 0;
mSizeOfQueue = 0;
mDraining = false;
mDrainComplete = false;
if (mDecoder && !mFlushed) {
RefPtr<MediaFormatReader> owner = mOwner;
TrackType type = mType == MediaData::AUDIO_DATA
? TrackType::kAudioTrack
: TrackType::kVideoTrack;
mDecoder->Flush()
->Then(mOwner->OwnerThread(), __func__,
[owner, type, this]() {
mFlushRequest.Complete();
if (!mShutdownPromise.IsEmpty()) {
ShutdownDecoder();
return;
}
owner->ScheduleUpdate(type);
},
[owner, type, this](const MediaResult& aError) {
mFlushRequest.Complete();
if (!mShutdownPromise.IsEmpty()) {
ShutdownDecoder();
return;
}
owner->NotifyError(type, aError);
})
->Track(mFlushRequest);
}
mFlushed = true;
}
// Reset the state of the DecoderData, clearing all queued frames
// (pending demuxed and decoded).
// Decoding will be suspended until mInputRequested is set again.
// The track demuxer is *not* reset.
void ResetState()
{
@ -385,9 +382,9 @@ private:
mWaitingForData = false;
mWaitingForKey = false;
mQueuedSamples.Clear();
mOutputRequested = false;
mNeedDraining = false;
mDecodePending = false;
mDecodeRequest.DisconnectIfExists();
mDrainRequest.DisconnectIfExists();
mDraining = false;
mDrainComplete = false;
mTimeThreshold.reset();
@ -426,15 +423,16 @@ private:
};
class DecoderDataWithPromise : public DecoderData {
class DecoderDataWithPromise : public DecoderData
{
public:
DecoderDataWithPromise(MediaFormatReader* aOwner,
MediaData::Type aType,
uint32_t aNumOfMaxError)
: DecoderData(aOwner, aType, aNumOfMaxError)
, mHasPromise(false)
{}
{
}
bool HasPromise() const override
{
@ -571,6 +569,7 @@ private:
UniquePtr<DecoderFactory> mDecoderFactory;
MediaEventListener mCompositorUpdatedListener;
MediaEventListener mOnTrackWaitingForKeyListener;
void OnFirstDemuxCompleted(TrackInfo::TrackType aType,
RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples);
@ -583,6 +582,11 @@ private:
// A flag indicating if the start time is known or not.
bool mHasStartTime = false;
void ShutdownDecoder(TrackType aTrack);
RefPtr<ShutdownPromise> ShutdownDecoderWithPromise(TrackType aTrack);
void TearDownDecoders();
MozPromiseHolder<ShutdownPromise> mShutdownPromise;
};
} // namespace mozilla

Просмотреть файл

@ -23,13 +23,15 @@ class AudioInfo;
class VideoInfo;
class TextInfo;
class MetadataTag {
class MetadataTag
{
public:
MetadataTag(const nsACString& aKey,
const nsACString& aValue)
: mKey(aKey)
, mValue(aValue)
{}
{
}
nsCString mKey;
nsCString mValue;
};
@ -37,9 +39,11 @@ public:
// Maximum channel number we can currently handle (7.1)
#define MAX_AUDIO_CHANNELS 8
class TrackInfo {
class TrackInfo
{
public:
enum TrackType {
enum TrackType
{
kUndefinedTrack,
kAudioTrack,
kVideoTrack,
@ -175,9 +179,11 @@ private:
};
// Stores info relevant to presenting media frames.
class VideoInfo : public TrackInfo {
class VideoInfo : public TrackInfo
{
public:
enum Rotation {
enum Rotation
{
kDegree_0 = 0,
kDegree_90 = 90,
kDegree_180 = 180,
@ -272,8 +278,9 @@ public:
// container.
nsIntRect ScaledImageRect(int64_t aWidth, int64_t aHeight) const
{
if ((aWidth == mImage.width && aHeight == mImage.height) ||
!mImage.width || !mImage.height) {
if ((aWidth == mImage.width && aHeight == mImage.height)
|| !mImage.width
|| !mImage.height) {
return ImageRect();
}
nsIntRect imageRect = ImageRect();
@ -325,7 +332,8 @@ private:
bool mAlphaPresent = false;
};
class AudioInfo : public TrackInfo {
class AudioInfo : public TrackInfo
{
public:
AudioInfo()
: TrackInfo(kAudioTrack, NS_LITERAL_STRING("1"), NS_LITERAL_STRING("main"),
@ -392,17 +400,18 @@ public:
RefPtr<MediaByteBuffer> mCodecSpecificConfig;
RefPtr<MediaByteBuffer> mExtraData;
};
class EncryptionInfo {
class EncryptionInfo
{
public:
EncryptionInfo()
: mEncrypted(false)
{
}
struct InitData {
struct InitData
{
template<typename AInitDatas>
InitData(const nsAString& aType, AInitDatas&& aInitData)
: mType(aType)
@ -449,7 +458,8 @@ private:
bool mEncrypted;
};
class MediaInfo {
class MediaInfo
{
public:
bool HasVideo() const
{
@ -484,8 +494,8 @@ public:
bool IsEncrypted() const
{
return (HasAudio() && mAudio.mCrypto.mValid) ||
(HasVideo() && mVideo.mCrypto.mValid);
return (HasAudio() && mAudio.mCrypto.mValid)
|| (HasVideo() && mVideo.mCrypto.mValid);
}
bool HasValidMedia() const
@ -499,8 +509,9 @@ public:
"Audio track ID must be valid");
NS_ASSERTION(!HasVideo() || mVideo.mTrackId != TRACK_INVALID,
"Audio track ID must be valid");
NS_ASSERTION(!HasAudio() || !HasVideo() ||
mAudio.mTrackId != mVideo.mTrackId,
NS_ASSERTION(!HasAudio()
|| !HasVideo()
|| mAudio.mTrackId != mVideo.mTrackId,
"Duplicate track IDs");
}
@ -529,7 +540,8 @@ public:
media::TimeUnit mStartTime;
};
class SharedTrackInfo {
class SharedTrackInfo
{
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(SharedTrackInfo)
public:
SharedTrackInfo(const TrackInfo& aOriginal, uint32_t aStreamID)
@ -571,7 +583,7 @@ public:
}
private:
~SharedTrackInfo() {};
~SharedTrackInfo() { }
UniquePtr<TrackInfo> mInfo;
// A unique ID, guaranteed to change when changing streams.
uint32_t mStreamSourceID;
@ -580,9 +592,11 @@ public:
const nsCString& mMimeType;
};
class AudioConfig {
class AudioConfig
{
public:
enum Channel {
enum Channel
{
CHANNEL_INVALID = -1,
CHANNEL_MONO = 0,
CHANNEL_LEFT,
@ -596,15 +610,14 @@ public:
CHANNEL_LFE,
};
class ChannelLayout {
class ChannelLayout
{
public:
ChannelLayout()
: mChannelMap(0)
, mValid(false)
{}
ChannelLayout() : mChannelMap(0), mValid(false) { }
explicit ChannelLayout(uint32_t aChannels)
: ChannelLayout(aChannels, SMPTEDefault(aChannels))
{}
{
}
ChannelLayout(uint32_t aChannels, const Channel* aConfig)
: ChannelLayout()
{
@ -645,9 +658,7 @@ public:
// the current layout can be easily reordered to aOther.
// aMap must be an array of size MAX_AUDIO_CHANNELS.
bool MappingTable(const ChannelLayout& aOther, uint8_t* aMap = nullptr) const;
bool IsValid() const {
return mValid;
}
bool IsValid() const { return mValid; }
bool HasChannel(Channel aChannel) const
{
return mChannelMap & (1 << aChannel);
@ -660,7 +671,8 @@ public:
bool mValid;
};
enum SampleFormat {
enum SampleFormat
{
FORMAT_NONE = 0,
FORMAT_U8,
FORMAT_S16,
@ -710,9 +722,10 @@ public:
}
bool operator==(const AudioConfig& aOther) const
{
return mChannelLayout == aOther.mChannelLayout &&
mRate == aOther.mRate && mFormat == aOther.mFormat &&
mInterleaved == aOther.mInterleaved;
return mChannelLayout == aOther.mChannelLayout
&& mRate == aOther.mRate
&& mFormat == aOther.mFormat
&& mInterleaved == aOther.mInterleaved;
}
bool operator!=(const AudioConfig& aOther) const
{

Просмотреть файл

@ -90,8 +90,12 @@ private:
DECL_MEDIA_PREF("accessibility.monoaudio.enable", MonoAudio, bool, false);
DECL_MEDIA_PREF("media.resampling.enabled", AudioSinkResampling, bool, false);
DECL_MEDIA_PREF("media.resampling.rate", AudioSinkResampleRate, uint32_t, 48000);
#if defined(XP_WIN)
// Enable multiple channel support on Windows.
DECL_MEDIA_PREF("media.forcestereo.enabled", AudioSinkForceStereo, bool, false);
#else
DECL_MEDIA_PREF("media.forcestereo.enabled", AudioSinkForceStereo, bool, true);
#endif
// VideoSink
DECL_MEDIA_PREF("media.ruin-av-sync.enabled", RuinAvSync, bool, false);

Просмотреть файл

@ -705,8 +705,8 @@ private:
{
MOZ_ASSERT(mRecorder->mAudioNode != nullptr);
nsIDocument* doc = mRecorder->mAudioNode->GetOwner()
? mRecorder->mAudioNode->GetOwner()->GetExtantDoc()
: nullptr;
? mRecorder->mAudioNode->GetOwner()->GetExtantDoc()
: nullptr;
nsCOMPtr<nsIPrincipal> principal = doc ? doc->NodePrincipal() : nullptr;
return PrincipalSubsumes(principal);
}

Просмотреть файл

@ -207,8 +207,10 @@ MediaStreamGraphImpl::ExtractPendingInput(SourceMediaStream* aStream,
// The logic is different from the manipulating of aStream->mTracks part.
// So it is not combined with the manipulating of aStream->mTracks part.
StreamTime offset = (data->mCommands & SourceMediaStream::TRACK_CREATE)
? data->mStart : aStream->mTracks.FindTrack(data->mID)->GetSegment()->GetDuration();
StreamTime offset =
(data->mCommands & SourceMediaStream::TRACK_CREATE)
? data->mStart
: aStream->mTracks.FindTrack(data->mID)->GetSegment()->GetDuration();
// Audio case.
if (data->mData->GetType() == MediaSegment::AUDIO) {
@ -395,13 +397,14 @@ MediaStreamGraphImpl::ProcessChunkMetadataForInterval(MediaStream* aStream,
PrincipalHandle principalHandle = chunk->GetPrincipalHandle();
if (principalHandle != aSegment.GetLastPrincipalHandle()) {
aSegment.SetLastPrincipalHandle(principalHandle);
STREAM_LOG(LogLevel::Debug, ("MediaStream %p track %d, principalHandle "
"changed in %sChunk with duration %lld",
aStream, aTrackID,
aSegment.GetType() == MediaSegment::AUDIO
? "Audio" : "Video",
(long long) chunk->GetDuration()));
for (const TrackBound<MediaStreamTrackListener>& listener : aStream->mTrackListeners) {
STREAM_LOG(LogLevel::Debug,
("MediaStream %p track %d, principalHandle "
"changed in %sChunk with duration %lld",
aStream, aTrackID,
aSegment.GetType() == MediaSegment::AUDIO ? "Audio" : "Video",
(long long)chunk->GetDuration()));
for (const TrackBound<MediaStreamTrackListener>& listener :
aStream->mTrackListeners) {
if (listener.mTrackID == aTrackID) {
listener.mListener->NotifyPrincipalHandleChanged(this, principalHandle);
}

Просмотреть файл

@ -55,8 +55,8 @@ DirectMediaStreamTrackListener::NotifyRealtimeTrackDataAndApplyTrackDisabling(Me
}
DisabledTrackMode mode = mDisabledBlackCount > 0
? DisabledTrackMode::SILENCE_BLACK
: DisabledTrackMode::SILENCE_FREEZE;
? DisabledTrackMode::SILENCE_BLACK
: DisabledTrackMode::SILENCE_FREEZE;
if (!mMedia) {
mMedia = aMedia.CreateEmptyClone();
}

Просмотреть файл

@ -124,9 +124,8 @@ public:
// 1- coded sample number if blocksize is variable or
// 2- coded frame number if blocksize is known.
// A frame is made of Blocksize sample.
mIndex = mVariableBlockSize
? frame_or_sample_num
: frame_or_sample_num * mBlocksize;
mIndex = mVariableBlockSize ? frame_or_sample_num
: frame_or_sample_num * mBlocksize;
// Sample rate.
if (sr_code < 12) {

Просмотреть файл

@ -36,8 +36,8 @@ struct MediaRawDataIPDL
Shmem buffer;
};
// This protocol provides a way to use MediaDataDecoder/MediaDataDecoderCallback
// across processes. The parent side currently is only implemented to work with
// This protocol provides a way to use MediaDataDecoder across processes.
// The parent side currently is only implemented to work with
// Window Media Foundation, but can be extended easily to support other backends.
// The child side runs in the content process, and the parent side runs in the
// GPU process. We run a separate IPDL thread for both sides.

Просмотреть файл

@ -21,13 +21,9 @@ using namespace ipc;
using namespace layers;
using namespace gfx;
RemoteVideoDecoder::RemoteVideoDecoder(MediaDataDecoderCallback* aCallback)
RemoteVideoDecoder::RemoteVideoDecoder()
: mActor(new VideoDecoderChild())
{
#ifdef DEBUG
mCallback = aCallback;
#endif
MOZ_ASSERT(mCallback->OnReaderTaskQueue());
}
RemoteVideoDecoder::~RemoteVideoDecoder()
@ -54,86 +50,63 @@ RemoteVideoDecoder::~RemoteVideoDecoder()
RefPtr<MediaDataDecoder::InitPromise>
RemoteVideoDecoder::Init()
{
MOZ_ASSERT(mCallback->OnReaderTaskQueue());
RefPtr<RemoteVideoDecoder> self = this;
return InvokeAsync(VideoDecoderManagerChild::GetManagerAbstractThread(),
this, __func__, &RemoteVideoDecoder::InitInternal);
__func__, [self, this]() { return mActor->Init(); });
}
RefPtr<MediaDataDecoder::InitPromise>
RemoteVideoDecoder::InitInternal()
RefPtr<MediaDataDecoder::DecodePromise>
RemoteVideoDecoder::Decode(MediaRawData* aSample)
{
MOZ_ASSERT(mActor);
MOZ_ASSERT(NS_GetCurrentThread() == VideoDecoderManagerChild::GetManagerThread());
return mActor->Init();
}
void
RemoteVideoDecoder::Input(MediaRawData* aSample)
{
MOZ_ASSERT(mCallback->OnReaderTaskQueue());
RefPtr<RemoteVideoDecoder> self = this;
RefPtr<MediaRawData> sample = aSample;
VideoDecoderManagerChild::GetManagerThread()->Dispatch(NS_NewRunnableFunction([self, sample]() {
MOZ_ASSERT(self->mActor);
self->mActor->Input(sample);
}), NS_DISPATCH_NORMAL);
return InvokeAsync(VideoDecoderManagerChild::GetManagerAbstractThread(),
__func__,
[self, this, sample]() { return mActor->Decode(sample); });
}
void
RefPtr<MediaDataDecoder::FlushPromise>
RemoteVideoDecoder::Flush()
{
MOZ_ASSERT(mCallback->OnReaderTaskQueue());
SynchronousTask task("Decoder flush");
VideoDecoderManagerChild::GetManagerThread()->Dispatch(NS_NewRunnableFunction([&]() {
MOZ_ASSERT(this->mActor);
this->mActor->Flush(&task);
}), NS_DISPATCH_NORMAL);
task.Wait();
RefPtr<RemoteVideoDecoder> self = this;
return InvokeAsync(VideoDecoderManagerChild::GetManagerAbstractThread(),
__func__, [self, this]() { return mActor->Flush(); });
}
void
RefPtr<MediaDataDecoder::DecodePromise>
RemoteVideoDecoder::Drain()
{
MOZ_ASSERT(mCallback->OnReaderTaskQueue());
RefPtr<RemoteVideoDecoder> self = this;
VideoDecoderManagerChild::GetManagerThread()->Dispatch(NS_NewRunnableFunction([self]() {
MOZ_ASSERT(self->mActor);
self->mActor->Drain();
}), NS_DISPATCH_NORMAL);
return InvokeAsync(VideoDecoderManagerChild::GetManagerAbstractThread(),
__func__, [self, this]() { return mActor->Drain(); });
}
void
RefPtr<ShutdownPromise>
RemoteVideoDecoder::Shutdown()
{
MOZ_ASSERT(mCallback->OnReaderTaskQueue());
SynchronousTask task("Shutdown");
RefPtr<RemoteVideoDecoder> self = this;
VideoDecoderManagerChild::GetManagerThread()->Dispatch(NS_NewRunnableFunction([&]() {
AutoCompleteTask complete(&task);
MOZ_ASSERT(self->mActor);
self->mActor->Shutdown();
}), NS_DISPATCH_NORMAL);
task.Wait();
return InvokeAsync(VideoDecoderManagerChild::GetManagerAbstractThread(),
__func__, [self, this]() {
mActor->Shutdown();
return ShutdownPromise::CreateAndResolve(true, __func__);
});
}
bool
RemoteVideoDecoder::IsHardwareAccelerated(nsACString& aFailureReason) const
{
MOZ_ASSERT(mCallback->OnReaderTaskQueue());
return mActor->IsHardwareAccelerated(aFailureReason);
}
void
RemoteVideoDecoder::SetSeekThreshold(const media::TimeUnit& aTime)
{
MOZ_ASSERT(mCallback->OnReaderTaskQueue());
RefPtr<RemoteVideoDecoder> self = this;
media::TimeUnit time = aTime;
VideoDecoderManagerChild::GetManagerThread()->Dispatch(NS_NewRunnableFunction([=]() {
MOZ_ASSERT(self->mActor);
self->mActor->SetSeekThreshold(time);
}), NS_DISPATCH_NORMAL);
}
nsresult
@ -174,16 +147,13 @@ RemoteDecoderModule::CreateVideoDecoder(const CreateDecoderParams& aParams)
return mWrapped->CreateVideoDecoder(aParams);
}
MediaDataDecoderCallback* callback = aParams.mCallback;
MOZ_ASSERT(callback->OnReaderTaskQueue());
RefPtr<RemoteVideoDecoder> object = new RemoteVideoDecoder(callback);
RefPtr<RemoteVideoDecoder> object = new RemoteVideoDecoder();
SynchronousTask task("InitIPDL");
bool success;
VideoDecoderManagerChild::GetManagerThread()->Dispatch(NS_NewRunnableFunction([&]() {
AutoCompleteTask complete(&task);
success = object->mActor->InitIPDL(callback,
aParams.VideoConfig(),
success = object->mActor->InitIPDL(aParams.VideoConfig(),
aParams.mKnowsCompositor->GetTextureFactoryIdentifier());
}), NS_DISPATCH_NORMAL);
task.Wait();

Просмотреть файл

@ -28,28 +28,23 @@ public:
// MediaDataDecoder
RefPtr<InitPromise> Init() override;
void Input(MediaRawData* aSample) override;
void Flush() override;
void Drain() override;
void Shutdown() override;
RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
RefPtr<DecodePromise> Drain() override;
RefPtr<FlushPromise> Flush() override;
RefPtr<ShutdownPromise> Shutdown() override;
bool IsHardwareAccelerated(nsACString& aFailureReason) const override;
void SetSeekThreshold(const media::TimeUnit& aTime) override;
const char* GetDescriptionName() const override { return "RemoteVideoDecoder"; }
private:
explicit RemoteVideoDecoder(MediaDataDecoderCallback* aCallback);
RemoteVideoDecoder();
~RemoteVideoDecoder();
RefPtr<InitPromise> InitInternal();
// Only ever written to from the reader task queue (during the constructor and destructor
// when we can guarantee no other threads are accessing it). Only read from the manager
// thread.
// Only ever written to from the reader task queue (during the constructor and
// destructor when we can guarantee no other threads are accessing it). Only
// read from the manager thread.
RefPtr<VideoDecoderChild> mActor;
#ifdef DEBUG
MediaDataDecoderCallback* mCallback;
#endif
};
// A PDM implementation that creates RemoteVideoDecoders.

Просмотреть файл

@ -10,7 +10,6 @@
#include "MediaInfo.h"
#include "ImageContainer.h"
#include "GPUVideoImage.h"
#include "mozilla/layers/SynchronousTask.h"
namespace mozilla {
namespace dom {
@ -22,10 +21,10 @@ using namespace gfx;
VideoDecoderChild::VideoDecoderChild()
: mThread(VideoDecoderManagerChild::GetManagerThread())
, mFlushTask(nullptr)
, mCanSend(false)
, mInitialized(false)
, mIsHardwareAccelerated(false)
, mNeedNewDecoder(false)
{
}
@ -54,9 +53,7 @@ VideoDecoderChild::RecvOutput(const VideoDataIPDL& aData)
aData.base().keyframe(),
aData.base().timecode(),
IntRect());
if (mCallback) {
mCallback->Output(video);
}
mDecodedData.AppendElement(Move(video));
return IPC_OK();
}
@ -64,9 +61,8 @@ mozilla::ipc::IPCResult
VideoDecoderChild::RecvInputExhausted()
{
AssertOnManagerThread();
if (mCallback) {
mCallback->InputExhausted();
}
mDecodePromise.ResolveIfExists(mDecodedData, __func__);
mDecodedData.Clear();
return IPC_OK();
}
@ -74,9 +70,8 @@ mozilla::ipc::IPCResult
VideoDecoderChild::RecvDrainComplete()
{
AssertOnManagerThread();
if (mCallback) {
mCallback->DrainComplete();
}
mDrainPromise.ResolveIfExists(mDecodedData, __func__);
mDecodedData.Clear();
return IPC_OK();
}
@ -84,14 +79,16 @@ mozilla::ipc::IPCResult
VideoDecoderChild::RecvError(const nsresult& aError)
{
AssertOnManagerThread();
if (mCallback) {
mCallback->Error(aError);
}
mDecodedData.Clear();
mDecodePromise.RejectIfExists(aError, __func__);
mDrainPromise.RejectIfExists(aError, __func__);
mFlushPromise.RejectIfExists(aError, __func__);
return IPC_OK();
}
mozilla::ipc::IPCResult
VideoDecoderChild::RecvInitComplete(const bool& aHardware, const nsCString& aHardwareReason)
VideoDecoderChild::RecvInitComplete(const bool& aHardware,
const nsCString& aHardwareReason)
{
AssertOnManagerThread();
mInitPromise.ResolveIfExists(TrackInfo::kVideoTrack, __func__);
@ -112,9 +109,8 @@ VideoDecoderChild::RecvInitFailed(const nsresult& aReason)
mozilla::ipc::IPCResult
VideoDecoderChild::RecvFlushComplete()
{
MOZ_ASSERT(mFlushTask);
AutoCompleteTask complete(mFlushTask);
mFlushTask = nullptr;
AssertOnManagerThread();
mFlushPromise.ResolveIfExists(true, __func__);
return IPC_OK();
}
@ -126,39 +122,45 @@ VideoDecoderChild::ActorDestroy(ActorDestroyReason aWhy)
// it'll be safe for MediaFormatReader to recreate decoders
RefPtr<VideoDecoderChild> ref = this;
GetManager()->RunWhenRecreated(NS_NewRunnableFunction([=]() {
if (ref->mInitialized && ref->mCallback) {
ref->mCallback->Error(NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER);
if (ref->mInitialized) {
mDecodedData.Clear();
mDecodePromise.RejectIfExists(NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER,
__func__);
mDrainPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER,
__func__);
mFlushPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER,
__func__);
// Make sure the next request will be rejected accordingly if ever
// called.
mNeedNewDecoder = true;
} else {
ref->mInitPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER, __func__);
ref->mInitPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER,
__func__);
}
}));
}
if (mFlushTask) {
AutoCompleteTask complete(mFlushTask);
mFlushTask = nullptr;
}
mCanSend = false;
}
bool
VideoDecoderChild::InitIPDL(MediaDataDecoderCallback* aCallback,
const VideoInfo& aVideoInfo,
VideoDecoderChild::InitIPDL(const VideoInfo& aVideoInfo,
const layers::TextureFactoryIdentifier& aIdentifier)
{
RefPtr<VideoDecoderManagerChild> manager = VideoDecoderManagerChild::GetSingleton();
// If the manager isn't available, then don't initialize mIPDLSelfRef and leave
// us in an error state. We'll then immediately reject the promise when Init()
// is called and the caller can try again. Hopefully by then the new manager is
// ready, or we've notified the caller of it being no longer available.
// If not, then the cycle repeats until we're ready.
RefPtr<VideoDecoderManagerChild> manager =
VideoDecoderManagerChild::GetSingleton();
// If the manager isn't available, then don't initialize mIPDLSelfRef and
// leave us in an error state. We'll then immediately reject the promise when
// Init() is called and the caller can try again. Hopefully by then the new
// manager is ready, or we've notified the caller of it being no longer
// available. If not, then the cycle repeats until we're ready.
if (!manager || !manager->CanSend()) {
return true;
}
mIPDLSelfRef = this;
mCallback = aCallback;
bool success = false;
if (manager->SendPVideoDecoderConstructor(this, aVideoInfo, aIdentifier, &success)) {
if (manager->SendPVideoDecoderConstructor(this, aVideoInfo, aIdentifier,
&success)) {
mCanSend = true;
}
return success;
@ -197,12 +199,20 @@ VideoDecoderChild::Init()
return mInitPromise.Ensure(__func__);
}
void
VideoDecoderChild::Input(MediaRawData* aSample)
RefPtr<MediaDataDecoder::DecodePromise>
VideoDecoderChild::Decode(MediaRawData* aSample)
{
AssertOnManagerThread();
if (mNeedNewDecoder) {
return MediaDataDecoder::DecodePromise::CreateAndReject(
NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER, __func__);
}
if (!mCanSend) {
return;
// We're here if the IPC channel has died but we're still waiting for the
// RunWhenRecreated task to complete. The decode promise will be rejected
// when that task is run.
return mDecodePromise.Ensure(__func__);
}
// TODO: It would be nice to add an allocator method to
@ -210,8 +220,8 @@ VideoDecoderChild::Input(MediaRawData* aSample)
// into shmem rather than requiring a copy here.
Shmem buffer;
if (!AllocShmem(aSample->Size(), Shmem::SharedMemory::TYPE_BASIC, &buffer)) {
mCallback->Error(NS_ERROR_DOM_MEDIA_DECODE_ERR);
return;
return MediaDataDecoder::DecodePromise::CreateAndReject(
NS_ERROR_DOM_MEDIA_DECODE_ERR, __func__);
}
memcpy(buffer.get<uint8_t>(), aSample->Data(), aSample->Size());
@ -224,27 +234,37 @@ VideoDecoderChild::Input(MediaRawData* aSample)
aSample->mKeyframe),
buffer);
SendInput(sample);
return mDecodePromise.Ensure(__func__);
}
void
VideoDecoderChild::Flush(SynchronousTask* aTask)
RefPtr<MediaDataDecoder::FlushPromise>
VideoDecoderChild::Flush()
{
AssertOnManagerThread();
mDecodePromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
mDrainPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
if (mNeedNewDecoder) {
return MediaDataDecoder::FlushPromise::CreateAndReject(
NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER, __func__);
}
if (mCanSend) {
SendFlush();
mFlushTask = aTask;
} else {
AutoCompleteTask complete(aTask);
}
return mFlushPromise.Ensure(__func__);
}
void
RefPtr<MediaDataDecoder::DecodePromise>
VideoDecoderChild::Drain()
{
AssertOnManagerThread();
if (mNeedNewDecoder) {
return MediaDataDecoder::DecodePromise::CreateAndReject(
NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER, __func__);
}
if (mCanSend) {
SendDrain();
}
return mDrainPromise.Ensure(__func__);
}
void
@ -256,7 +276,6 @@ VideoDecoderChild::Shutdown()
SendShutdown();
}
mInitialized = false;
mCallback = nullptr;
}
bool

Просмотреть файл

@ -6,15 +6,10 @@
#ifndef include_dom_ipc_VideoDecoderChild_h
#define include_dom_ipc_VideoDecoderChild_h
#include "mozilla/RefPtr.h"
#include "mozilla/dom/PVideoDecoderChild.h"
#include "MediaData.h"
#include "PlatformDecoderModule.h"
#include "mozilla/dom/PVideoDecoderChild.h"
namespace mozilla {
namespace layers {
class SynchronousTask;
}
namespace dom {
class RemoteVideoDecoder;
@ -40,16 +35,15 @@ public:
void ActorDestroy(ActorDestroyReason aWhy) override;
RefPtr<MediaDataDecoder::InitPromise> Init();
void Input(MediaRawData* aSample);
void Flush(layers::SynchronousTask* Task);
void Drain();
RefPtr<MediaDataDecoder::DecodePromise> Decode(MediaRawData* aSample);
RefPtr<MediaDataDecoder::DecodePromise> Drain();
RefPtr<MediaDataDecoder::FlushPromise> Flush();
void Shutdown();
bool IsHardwareAccelerated(nsACString& aFailureReason) const;
void SetSeekThreshold(const media::TimeUnit& aTime);
MOZ_IS_CLASS_INIT
bool InitIPDL(MediaDataDecoderCallback* aCallback,
const VideoInfo& aVideoInfo,
bool InitIPDL(const VideoInfo& aVideoInfo,
const layers::TextureFactoryIdentifier& aIdentifier);
void DestroyIPDL();
@ -66,16 +60,19 @@ private:
RefPtr<VideoDecoderChild> mIPDLSelfRef;
RefPtr<nsIThread> mThread;
MediaDataDecoderCallback* mCallback;
MozPromiseHolder<MediaDataDecoder::InitPromise> mInitPromise;
layers::SynchronousTask* mFlushTask;
MozPromiseHolder<MediaDataDecoder::DecodePromise> mDecodePromise;
MozPromiseHolder<MediaDataDecoder::DecodePromise> mDrainPromise;
MozPromiseHolder<MediaDataDecoder::FlushPromise> mFlushPromise;
nsCString mHardwareAcceleratedReason;
bool mCanSend;
bool mInitialized;
bool mIsHardwareAccelerated;
// Set to true if the actor got destroyed and we haven't yet notified the
// caller.
bool mNeedNewDecoder;
MediaDataDecoder::DecodedData mDecodedData;
};
} // namespace dom

Просмотреть файл

@ -72,7 +72,6 @@ VideoDecoderParent::VideoDecoderParent(VideoDecoderManagerParent* aParent,
CreateDecoderParams params(aVideoInfo);
params.mTaskQueue = mDecodeTaskQueue;
params.mCallback = this;
params.mKnowsCompositor = mKnowsCompositor;
params.mImageContainer = new layers::ImageContainer();
@ -134,28 +133,73 @@ VideoDecoderParent::RecvInput(const MediaRawDataIPDL& aData)
DeallocShmem(aData.buffer());
mDecoder->Input(data);
RefPtr<VideoDecoderParent> self = this;
mDecoder->Decode(data)->Then(
mManagerTaskQueue, __func__,
[self, this](const MediaDataDecoder::DecodedData& aResults) {
if (mDestroyed) {
return;
}
ProcessDecodedData(aResults);
Unused << SendInputExhausted();
},
[self, this](const MediaResult& aError) { Error(aError); });
return IPC_OK();
}
void
VideoDecoderParent::ProcessDecodedData(
const MediaDataDecoder::DecodedData& aData)
{
MOZ_ASSERT(OnManagerThread());
for (const auto& data : aData) {
MOZ_ASSERT(data->mType == MediaData::VIDEO_DATA,
"Can only decode videos using VideoDecoderParent!");
VideoData* video = static_cast<VideoData*>(data.get());
MOZ_ASSERT(video->mImage, "Decoded video must output a layer::Image to "
"be used with VideoDecoderParent");
RefPtr<TextureClient> texture =
video->mImage->GetTextureClient(mKnowsCompositor);
if (!texture) {
texture = ImageClient::CreateTextureClientForImage(video->mImage,
mKnowsCompositor);
}
if (texture && !texture->IsAddedToCompositableClient()) {
texture->InitIPDLActor(mKnowsCompositor);
texture->SetAddedToCompositableClient();
}
VideoDataIPDL output(
MediaDataIPDL(data->mOffset, data->mTime, data->mTimecode,
data->mDuration, data->mFrames, data->mKeyframe),
video->mDisplay,
texture ? mParent->StoreImage(video->mImage, texture)
: SurfaceDescriptorGPUVideo(0),
video->mFrameID);
Unused << SendOutput(output);
}
}
mozilla::ipc::IPCResult
VideoDecoderParent::RecvFlush()
{
MOZ_ASSERT(!mDestroyed);
MOZ_ASSERT(OnManagerThread());
if (mDecoder) {
mDecoder->Flush();
}
// Dispatch a runnable to our own event queue so that
// it will be processed after anything that got dispatched
// during the Flush call.
RefPtr<VideoDecoderParent> self = this;
mManagerTaskQueue->Dispatch(NS_NewRunnableFunction([self]() {
if (!self->mDestroyed) {
Unused << self->SendFlushComplete();
}
}));
mDecoder->Flush()->Then(
mManagerTaskQueue, __func__,
[self, this]() {
if (!mDestroyed) {
Unused << SendFlushComplete();
}
},
[self, this](const MediaResult& aError) { Error(aError); });
return IPC_OK();
}
@ -164,7 +208,16 @@ VideoDecoderParent::RecvDrain()
{
MOZ_ASSERT(!mDestroyed);
MOZ_ASSERT(OnManagerThread());
mDecoder->Drain();
RefPtr<VideoDecoderParent> self = this;
mDecoder->Drain()->Then(
mManagerTaskQueue, __func__,
[self, this](const MediaDataDecoder::DecodedData& aResults) {
if (!mDestroyed) {
ProcessDecodedData(aResults);
Unused << SendDrainComplete();
}
},
[self, this](const MediaResult& aError) { Error(aError); });
return IPC_OK();
}
@ -203,91 +256,13 @@ VideoDecoderParent::ActorDestroy(ActorDestroyReason aWhy)
}
}
void
VideoDecoderParent::Output(MediaData* aData)
{
MOZ_ASSERT(mDecodeTaskQueue->IsCurrentThreadIn());
RefPtr<VideoDecoderParent> self = this;
RefPtr<KnowsCompositor> knowsCompositor = mKnowsCompositor;
RefPtr<MediaData> data = aData;
mManagerTaskQueue->Dispatch(NS_NewRunnableFunction([self, knowsCompositor, data]() {
if (self->mDestroyed) {
return;
}
MOZ_ASSERT(data->mType == MediaData::VIDEO_DATA, "Can only decode videos using VideoDecoderParent!");
VideoData* video = static_cast<VideoData*>(data.get());
MOZ_ASSERT(video->mImage, "Decoded video must output a layer::Image to be used with VideoDecoderParent");
RefPtr<TextureClient> texture = video->mImage->GetTextureClient(knowsCompositor);
if (!texture) {
texture = ImageClient::CreateTextureClientForImage(video->mImage, knowsCompositor);
}
if (texture && !texture->IsAddedToCompositableClient()) {
texture->InitIPDLActor(knowsCompositor);
texture->SetAddedToCompositableClient();
}
VideoDataIPDL output(MediaDataIPDL(data->mOffset,
data->mTime,
data->mTimecode,
data->mDuration,
data->mFrames,
data->mKeyframe),
video->mDisplay,
texture ? self->mParent->StoreImage(video->mImage, texture) : SurfaceDescriptorGPUVideo(0),
video->mFrameID);
Unused << self->SendOutput(output);
}));
}
void
VideoDecoderParent::Error(const MediaResult& aError)
{
MOZ_ASSERT(mDecodeTaskQueue->IsCurrentThreadIn());
RefPtr<VideoDecoderParent> self = this;
MediaResult error = aError;
mManagerTaskQueue->Dispatch(NS_NewRunnableFunction([self, error]() {
if (!self->mDestroyed) {
Unused << self->SendError(error);
}
}));
}
void
VideoDecoderParent::InputExhausted()
{
MOZ_ASSERT(mDecodeTaskQueue->IsCurrentThreadIn());
RefPtr<VideoDecoderParent> self = this;
mManagerTaskQueue->Dispatch(NS_NewRunnableFunction([self]() {
if (!self->mDestroyed) {
Unused << self->SendInputExhausted();
}
}));
}
void
VideoDecoderParent::DrainComplete()
{
MOZ_ASSERT(mDecodeTaskQueue->IsCurrentThreadIn());
RefPtr<VideoDecoderParent> self = this;
mManagerTaskQueue->Dispatch(NS_NewRunnableFunction([self]() {
if (!self->mDestroyed) {
Unused << self->SendDrainComplete();
}
}));
}
bool
VideoDecoderParent::OnReaderTaskQueue()
{
// Most of our calls into mDecoder come directly from IPDL so are on
// the right thread, but not actually on the task queue. We only ever
// run a single thread, not a pool, so this should work fine.
return OnManagerThread();
MOZ_ASSERT(OnManagerThread());
if (!mDestroyed) {
Unused << SendError(aError);
}
}
bool

Просмотреть файл

@ -6,20 +6,20 @@
#ifndef include_dom_ipc_VideoDecoderParent_h
#define include_dom_ipc_VideoDecoderParent_h
#include "mozilla/RefPtr.h"
#include "ImageContainer.h"
#include "MediaData.h"
#include "PlatformDecoderModule.h"
#include "VideoDecoderManagerParent.h"
#include "mozilla/MozPromise.h"
#include "mozilla/dom/PVideoDecoderParent.h"
#include "mozilla/layers/TextureForwarder.h"
#include "VideoDecoderManagerParent.h"
#include "MediaData.h"
#include "ImageContainer.h"
namespace mozilla {
namespace dom {
class KnowsCompositorVideo;
class VideoDecoderParent final : public PVideoDecoderParent,
public MediaDataDecoderCallback
class VideoDecoderParent final : public PVideoDecoderParent
{
public:
// We refcount this class since the task queue can have runnables
@ -45,17 +45,12 @@ public:
void ActorDestroy(ActorDestroyReason aWhy) override;
// MediaDataDecoderCallback
void Output(MediaData* aData) override;
void Error(const MediaResult& aError) override;
void InputExhausted() override;
void DrainComplete() override;
bool OnReaderTaskQueue() override;
private:
bool OnManagerThread();
void Error(const MediaResult& aError);
~VideoDecoderParent();
void ProcessDecodedData(const MediaDataDecoder::DecodedData& aData);
RefPtr<VideoDecoderManagerParent> mParent;
RefPtr<VideoDecoderParent> mIPDLSelfRef;

Просмотреть файл

@ -71,8 +71,10 @@ DecodedAudioDataSink::DecodedAudioDataSink(AbstractThread* aThread,
bool monoAudioEnabled = MediaPrefs::MonoAudio();
mOutputChannels = monoAudioEnabled
? 1 : (MediaPrefs::AudioSinkForceStereo() ? 2 : mInfo.mChannels);
mOutputChannels =
monoAudioEnabled
? 1
: (MediaPrefs::AudioSinkForceStereo() ? 2 : mInfo.mChannels);
}
DecodedAudioDataSink::~DecodedAudioDataSink()
@ -195,7 +197,15 @@ nsresult
DecodedAudioDataSink::InitializeAudioStream(const PlaybackParams& aParams)
{
mAudioStream = new AudioStream(*this);
nsresult rv = mAudioStream->Init(mOutputChannels, mOutputRate, mChannel);
// When AudioQueue is empty, there is no way to know the channel layout of
// the coming audio data, so we use the predefined channel map instead.
uint32_t channelMap = mConverter
? mConverter->OutputConfig().Layout().Map()
: AudioStream::GetPreferredChannelMap(mOutputChannels);
// The layout map used here is already processed by mConverter with
// mOutputChannels into SMPTE format, so there is no need to worry if
// MediaPrefs::MonoAudio() or MediaPrefs::AudioSinkForceStereo() is applied.
nsresult rv = mAudioStream->Init(mOutputChannels, channelMap, mOutputRate, mChannel);
if (NS_FAILED(rv)) {
mAudioStream->Shutdown();
mAudioStream = nullptr;

Просмотреть файл

@ -310,7 +310,8 @@ public:
return NS_ERROR_NOT_AVAILABLE;
}
uint64_t frameDuration = (completeIdx + 1u < mapping.Length())
uint64_t frameDuration =
(completeIdx + 1u < mapping.Length())
? mapping[completeIdx + 1].mTimecode - mapping[completeIdx].mTimecode
: mapping[completeIdx].mTimecode - previousMapping.ref().mTimecode;
aStart = mapping[0].mTimecode / NS_PER_USEC;

Просмотреть файл

@ -284,8 +284,8 @@ MediaSourceDecoder::NextFrameBufferedStatus()
currentPosition
+ media::TimeUnit::FromMicroseconds(DEFAULT_NEXT_FRAME_AVAILABLE_BUFFERED));
return buffered.ContainsStrict(ClampIntervalToEnd(interval))
? MediaDecoderOwner::NEXT_FRAME_AVAILABLE
: MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE;
? MediaDecoderOwner::NEXT_FRAME_AVAILABLE
: MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE;
}
bool

Просмотреть файл

@ -291,10 +291,9 @@ MediaSourceTrackDemuxer::MediaSourceTrackDemuxer(MediaSourceDemuxer* aParent,
, mType(aType)
, mMonitor("MediaSourceTrackDemuxer")
, mReset(true)
, mPreRoll(
TimeUnit::FromMicroseconds(
OpusDataDecoder::IsOpus(mParent->GetTrackInfo(mType)->mMimeType)
? 80000 : 0))
, mPreRoll(TimeUnit::FromMicroseconds(
OpusDataDecoder::IsOpus(mParent->GetTrackInfo(mType)->mMimeType) ? 80000
: 0))
{
}

Просмотреть файл

@ -1319,13 +1319,14 @@ TrackBuffersManager::CompleteCodedFrameProcessing()
// 6. Remove the media segment bytes from the beginning of the input buffer.
// Clear our demuxer from any already processed data.
int64_t safeToEvict = std::min(
HasVideo()
? mVideoTracks.mDemuxer->GetEvictionOffset(mVideoTracks.mLastParsedEndTime)
: INT64_MAX,
HasAudio()
? mAudioTracks.mDemuxer->GetEvictionOffset(mAudioTracks.mLastParsedEndTime)
: INT64_MAX);
int64_t safeToEvict = std::min(HasVideo()
? mVideoTracks.mDemuxer->GetEvictionOffset(
mVideoTracks.mLastParsedEndTime)
: INT64_MAX,
HasAudio()
? mAudioTracks.mDemuxer->GetEvictionOffset(
mAudioTracks.mLastParsedEndTime)
: INT64_MAX);
ErrorResult rv;
mCurrentInputBuffer->EvictBefore(safeToEvict, rv);
if (rv.Failed()) {
@ -1398,8 +1399,10 @@ TrackBuffersManager::ProcessFrames(TrackBuffer& aSamples, TrackData& aTrackData)
// Let presentation timestamp equal 0.
// Otherwise
// Let presentation timestamp be a double precision floating point representation of the coded frame's presentation timestamp in seconds.
TimeUnit presentationTimestamp = mSourceBufferAttributes->mGenerateTimestamps
? TimeUnit() : TimeUnit::FromMicroseconds(aSamples[0]->mTime);
TimeUnit presentationTimestamp =
mSourceBufferAttributes->mGenerateTimestamps
? TimeUnit()
: TimeUnit::FromMicroseconds(aSamples[0]->mTime);
// 3. If mode equals "sequence" and group start timestamp is set, then run the following steps:
CheckSequenceDiscontinuity(presentationTimestamp);
@ -1412,12 +1415,13 @@ TrackBuffersManager::ProcessFrames(TrackBuffer& aSamples, TrackData& aTrackData)
// of +- mLongestFrameDuration on the append window start.
// We only apply the leeway with the default append window start of 0
// otherwise do as per spec.
TimeInterval targetWindow = mAppendWindow.mStart != TimeUnit::FromSeconds(0)
TimeInterval targetWindow =
mAppendWindow.mStart != TimeUnit::FromSeconds(0)
? mAppendWindow
: TimeInterval(mAppendWindow.mStart, mAppendWindow.mEnd,
trackBuffer.mLastFrameDuration.isSome()
? trackBuffer.mLongestFrameDuration
: TimeUnit::FromMicroseconds(aSamples[0]->mDuration));
? trackBuffer.mLongestFrameDuration
: TimeUnit::FromMicroseconds(aSamples[0]->mDuration));
TimeIntervals samplesRange;
uint32_t sizeNewSamples = 0;
@ -1484,13 +1488,12 @@ TrackBuffersManager::ProcessFrames(TrackBuffer& aSamples, TrackData& aTrackData)
TimeInterval sampleInterval =
mSourceBufferAttributes->mGenerateTimestamps
? TimeInterval(timestampOffset, timestampOffset + sampleDuration)
: TimeInterval(timestampOffset + sampleTime,
timestampOffset + sampleTime + sampleDuration);
TimeUnit decodeTimestamp =
mSourceBufferAttributes->mGenerateTimestamps
? timestampOffset
: timestampOffset + sampleTimecode;
? TimeInterval(timestampOffset, timestampOffset + sampleDuration)
: TimeInterval(timestampOffset + sampleTime,
timestampOffset + sampleTime + sampleDuration);
TimeUnit decodeTimestamp = mSourceBufferAttributes->mGenerateTimestamps
? timestampOffset
: timestampOffset + sampleTimecode;
// 6. If last decode timestamp for track buffer is set and decode timestamp is less than last decode timestamp:
// OR
@ -1525,8 +1528,8 @@ TrackBuffersManager::ProcessFrames(TrackBuffer& aSamples, TrackData& aTrackData)
// Rather that restarting the process for the frame, we run the first
// steps again instead.
// 3. If mode equals "sequence" and group start timestamp is set, then run the following steps:
TimeUnit presentationTimestamp = mSourceBufferAttributes->mGenerateTimestamps
? TimeUnit() : sampleTime;
TimeUnit presentationTimestamp =
mSourceBufferAttributes->mGenerateTimestamps ? TimeUnit() : sampleTime;
CheckSequenceDiscontinuity(presentationTimestamp);
if (!sample->mKeyframe) {
@ -1538,13 +1541,12 @@ TrackBuffersManager::ProcessFrames(TrackBuffer& aSamples, TrackData& aTrackData)
timestampOffset = mSourceBufferAttributes->GetTimestampOffset();
sampleInterval =
mSourceBufferAttributes->mGenerateTimestamps
? TimeInterval(timestampOffset, timestampOffset + sampleDuration)
: TimeInterval(timestampOffset + sampleTime,
timestampOffset + sampleTime + sampleDuration);
decodeTimestamp =
mSourceBufferAttributes->mGenerateTimestamps
? timestampOffset
: timestampOffset + sampleTimecode;
? TimeInterval(timestampOffset, timestampOffset + sampleDuration)
: TimeInterval(timestampOffset + sampleTime,
timestampOffset + sampleTime + sampleDuration);
decodeTimestamp = mSourceBufferAttributes->mGenerateTimestamps
? timestampOffset
: timestampOffset + sampleTimecode;
}
trackBuffer.mNeedRandomAccessPoint = false;
needDiscontinuityCheck = false;

Просмотреть файл

@ -363,8 +363,8 @@ TheoraState::Init()
int64_t n = mTheoraInfo.aspect_numerator;
int64_t d = mTheoraInfo.aspect_denominator;
float aspectRatio = (n == 0 || d == 0)
? 1.0f : static_cast<float>(n) / static_cast<float>(d);
float aspectRatio =
(n == 0 || d == 0) ? 1.0f : static_cast<float>(n) / static_cast<float>(d);
// Ensure the frame and picture regions aren't larger than our prescribed
// maximum, or zero sized.

Просмотреть файл

@ -35,7 +35,6 @@
#include "MediaInfo.h"
#include "MediaPrefs.h"
#include "FuzzingWrapper.h"
#include "H264Converter.h"
#include "AgnosticDecoderModule.h"
@ -287,21 +286,8 @@ PDMFactory::CreateDecoderWithPDM(PlatformDecoderModule* aPDM,
return nullptr;
}
MediaDataDecoderCallback* callback = aParams.mCallback;
RefPtr<DecoderCallbackFuzzingWrapper> callbackWrapper;
if (MediaPrefs::PDMFuzzingEnabled()) {
callbackWrapper = new DecoderCallbackFuzzingWrapper(callback);
callbackWrapper->SetVideoOutputMinimumInterval(
TimeDuration::FromMilliseconds(MediaPrefs::PDMFuzzingInterval()));
callbackWrapper->SetDontDelayInputExhausted(!MediaPrefs::PDMFuzzingDelayInputExhausted());
callback = callbackWrapper.get();
}
CreateDecoderParams params = aParams;
params.mCallback = callback;
if (MP4Decoder::IsH264(config.mMimeType) && !aParams.mUseBlankDecoder) {
RefPtr<H264Converter> h = new H264Converter(aPDM, params);
RefPtr<H264Converter> h = new H264Converter(aPDM, aParams);
const nsresult rv = h->GetLastError();
if (NS_SUCCEEDED(rv) || rv == NS_ERROR_NOT_INITIALIZED) {
// The H264Converter either successfully created the wrapped decoder,
@ -310,11 +296,7 @@ PDMFactory::CreateDecoderWithPDM(PlatformDecoderModule* aPDM,
m = h.forget();
}
} else {
m = aPDM->CreateVideoDecoder(params);
}
if (callbackWrapper && m) {
m = new DecoderFuzzingWrapper(m.forget(), callbackWrapper.forget());
m = aPDM->CreateVideoDecoder(aParams);
}
return m.forget();

Просмотреть файл

@ -34,16 +34,17 @@ class RemoteDecoderModule;
}
class MediaDataDecoder;
class MediaDataDecoderCallback;
class TaskQueue;
class CDMProxy;
static LazyLogModule sPDMLog("PlatformDecoderModule");
struct MOZ_STACK_CLASS CreateDecoderParams final {
struct MOZ_STACK_CLASS CreateDecoderParams final
{
explicit CreateDecoderParams(const TrackInfo& aConfig)
: mConfig(aConfig)
{}
{
}
template <typename T1, typename... Ts>
CreateDecoderParams(const TrackInfo& aConfig, T1&& a1, Ts&&... args)
@ -74,23 +75,40 @@ struct MOZ_STACK_CLASS CreateDecoderParams final {
const TrackInfo& mConfig;
TaskQueue* mTaskQueue = nullptr;
MediaDataDecoderCallback* mCallback = nullptr;
DecoderDoctorDiagnostics* mDiagnostics = nullptr;
layers::ImageContainer* mImageContainer = nullptr;
MediaResult* mError = nullptr;
RefPtr<layers::KnowsCompositor> mKnowsCompositor;
RefPtr<GMPCrashHelper> mCrashHelper;
bool mUseBlankDecoder = false;
TrackInfo::TrackType mType = TrackInfo::kUndefinedTrack;
MediaEventProducer<TrackInfo::TrackType>* mOnWaitingForKeyEvent = nullptr;
private:
void Set(TaskQueue* aTaskQueue) { mTaskQueue = aTaskQueue; }
void Set(MediaDataDecoderCallback* aCallback) { mCallback = aCallback; }
void Set(DecoderDoctorDiagnostics* aDiagnostics) { mDiagnostics = aDiagnostics; }
void Set(layers::ImageContainer* aImageContainer) { mImageContainer = aImageContainer; }
void Set(DecoderDoctorDiagnostics* aDiagnostics)
{
mDiagnostics = aDiagnostics;
}
void Set(layers::ImageContainer* aImageContainer)
{
mImageContainer = aImageContainer;
}
void Set(MediaResult* aError) { mError = aError; }
void Set(GMPCrashHelper* aCrashHelper) { mCrashHelper = aCrashHelper; }
void Set(bool aUseBlankDecoder) { mUseBlankDecoder = aUseBlankDecoder; }
void Set(layers::KnowsCompositor* aKnowsCompositor) { mKnowsCompositor = aKnowsCompositor; }
void Set(layers::KnowsCompositor* aKnowsCompositor)
{
mKnowsCompositor = aKnowsCompositor;
}
void Set(TrackInfo::TrackType aType)
{
mType = aType;
}
void Set(MediaEventProducer<TrackInfo::TrackType>* aOnWaitingForKey)
{
mOnWaitingForKeyEvent = aOnWaitingForKey;
}
template <typename T1, typename T2, typename... Ts>
void Set(T1&& a1, T2&& a2, Ts&&... args)
{
@ -113,7 +131,8 @@ private:
// output samples exists for testing, and is created when the pref
// "media.use-blank-decoder" is true.
class PlatformDecoderModule {
class PlatformDecoderModule
{
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(PlatformDecoderModule)
@ -132,7 +151,8 @@ public:
return SupportsMimeType(aTrackInfo.mMimeType, aDiagnostics);
}
enum class ConversionRequired : uint8_t {
enum class ConversionRequired : uint8_t
{
kNeedNone,
kNeedAVCC,
kNeedAnnexB,
@ -157,7 +177,6 @@ protected:
// Asynchronous decoding of video should be done in runnables dispatched
// to aVideoTaskQueue. If the task queue isn't needed, the decoder should
// not hold a reference to it.
// Output and errors should be returned to the reader via aCallback.
// On Windows the task queue's threads in have MSCOM initialized with
// COINIT_MULTITHREADED.
// Returns nullptr if the decoder can't be created.
@ -170,7 +189,6 @@ protected:
// Asynchronous decoding of audio should be done in runnables dispatched to
// aAudioTaskQueue. If the task queue isn't needed, the decoder should
// not hold a reference to it.
// Output and errors should be returned to the reader via aCallback.
// Returns nullptr if the decoder can't be created.
// On Windows the task queue's threads in have MSCOM initialized with
// COINIT_MULTITHREADED.
@ -180,40 +198,6 @@ protected:
CreateAudioDecoder(const CreateDecoderParams& aParams) = 0;
};
// A callback used by MediaDataDecoder to return output/errors to the
// MediaFormatReader.
// Implementation is threadsafe, and can be called on any thread.
class MediaDataDecoderCallback {
public:
virtual ~MediaDataDecoderCallback() {}
// Called by MediaDataDecoder when a sample has been decoded.
virtual void Output(MediaData* aData) = 0;
// Denotes an error in the decoding process. The reader will stop calling
// the decoder.
virtual void Error(const MediaResult& aError) = 0;
// Denotes that the last input sample has been inserted into the decoder,
// and no more output can be produced unless more input is sent.
// A frame decoding session is completed once InputExhausted has been called.
// MediaDataDecoder::Input will not be called again until InputExhausted has
// been called.
virtual void InputExhausted() = 0;
virtual void DrainComplete() = 0;
virtual void ReleaseMediaResources() {}
virtual bool OnReaderTaskQueue() = 0;
// Denotes that a pending encryption key is preventing more input being fed
// into the decoder. This only needs to be overridden for callbacks that
// handle encryption. E.g. benchmarking does not use eme, so this need
// not be overridden in that case.
virtual void WaitingForKey() {}
};
// MediaDataDecoder is the interface exposed by decoders created by the
// PlatformDecoderModule's Create*Decoder() functions. The type of
// media data that the decoder accepts as valid input and produces as
@ -231,17 +215,19 @@ public:
// TaskQueue passed into the PlatformDecoderModules's Create*Decoder()
// function. This may not be necessary for platforms with async APIs
// for decoding.
//
// If an error occurs at any point after the Init promise has been
// completed, then Error() must be called on the associated
// MediaDataDecoderCallback.
class MediaDataDecoder {
class MediaDataDecoder
{
protected:
virtual ~MediaDataDecoder() {};
public:
typedef TrackInfo::TrackType TrackType;
typedef MozPromise<TrackType, MediaResult, /* IsExclusive = */ true> InitPromise;
typedef nsTArray<RefPtr<MediaData>> DecodedData;
typedef MozPromise<TrackType, MediaResult, /* IsExclusive = */ true>
InitPromise;
typedef MozPromise<DecodedData, MediaResult, /* IsExclusive = */ true>
DecodePromise;
typedef MozPromise<bool, MediaResult, /* IsExclusive = */ true> FlushPromise;
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaDataDecoder)
@ -254,41 +240,39 @@ public:
// be done here so that it can be canceled by calling Shutdown()!
virtual RefPtr<InitPromise> Init() = 0;
// Inserts a sample into the decoder's decode pipeline.
virtual void Input(MediaRawData* aSample) = 0;
// Causes all samples in the decoding pipeline to be discarded. When
// this function returns, the decoder must be ready to accept new input
// for decoding. This function is called when the demuxer seeks, before
// decoding resumes after the seek.
// While the reader calls Flush(), it ignores all output sent to it;
// it is safe (but pointless) to send output while Flush is called.
// The MediaFormatReader will not call Input() while it's calling Flush().
virtual void Flush() = 0;
// Inserts a sample into the decoder's decode pipeline. The DecodePromise will
// be resolved with the decoded MediaData. In case the decoder needs more
// input, the DecodePromise may be resolved with an empty array of samples to
// indicate that Decode should be called again before a MediaData is returned.
virtual RefPtr<DecodePromise> Decode(MediaRawData* aSample) = 0;
// Causes all complete samples in the pipeline that can be decoded to be
// output. If the decoder can't produce samples from the current output,
// it drops the input samples. The decoder may be holding onto samples
// that are required to decode samples that it expects to get in future.
// This is called when the demuxer reaches end of stream.
// The MediaFormatReader will not call Input() while it's calling Drain().
// This function is asynchronous. The MediaDataDecoder must call
// MediaDataDecoderCallback::DrainComplete() once all remaining
// samples have been output.
virtual void Drain() = 0;
// This function is asynchronous. The MediaDataDecoder shall resolve the
// pending DecodePromise will all drained samples.
virtual RefPtr<DecodePromise> Drain() = 0;
// Cancels all init/input/drain operations, and shuts down the
// decoder. The platform decoder should clean up any resources it's using
// and release memory etc. Shutdown() must block until the decoder has
// completed shutdown. The reader calls Flush() before calling Shutdown().
// The reader will delete the decoder once Shutdown() returns.
// The MediaDataDecoderCallback *must* not be called after Shutdown() has
// returned.
virtual void Shutdown() = 0;
// Causes all samples in the decoding pipeline to be discarded. When this
// promise resolves, the decoder must be ready to accept new data for
// decoding. This function is called when the demuxer seeks, before decoding
// resumes after the seek. The current DecodePromise if any shall be rejected
// with NS_ERROR_DOM_MEDIA_CANCELED
virtual RefPtr<FlushPromise> Flush() = 0;
// Called from the state machine task queue or main thread.
// Decoder needs to decide whether or not hardware accelearation is supported
// after creating. It doesn't need to call Init() before calling this function.
// Cancels all init/decode/drain operations, and shuts down the decoder. The
// platform decoder should clean up any resources it's using and release
// memory etc. The shutdown promise will be resolved once the decoder has
// completed shutdown. The reader calls Flush() before calling Shutdown(). The
// reader will delete the decoder once the promise is resolved.
// The ShutdownPromise must only ever be resolved.
virtual RefPtr<ShutdownPromise> Shutdown() = 0;
// Called from the state machine task queue or main thread. Decoder needs to
// decide whether or not hardware acceleration is supported after creating.
// It doesn't need to call Init() before calling this function.
virtual bool IsHardwareAccelerated(nsACString& aFailureReason) const { return false; }
// Return the name of the MediaDataDecoder, only used for decoding.

Просмотреть файл

@ -32,7 +32,6 @@ public:
BlankMediaDataDecoder(BlankMediaDataCreator* aCreator,
const CreateDecoderParams& aParams)
: mCreator(aCreator)
, mCallback(aParams.mCallback)
, mMaxRefFrames(aParams.mConfig.GetType() == TrackInfo::kVideoTrack &&
MP4Decoder::IsH264(aParams.mConfig.mMimeType)
? mp4_demuxer::AnnexB::HasSPS(aParams.VideoConfig().mExtraData)
@ -47,30 +46,45 @@ public:
return InitPromise::CreateAndResolve(mType, __func__);
}
void Shutdown() override {}
RefPtr<ShutdownPromise> Shutdown() override
{
return ShutdownPromise::CreateAndResolve(true, __func__);
}
void Input(MediaRawData* aSample) override
RefPtr<DecodePromise> Decode(MediaRawData* aSample) override
{
RefPtr<MediaData> data =
mCreator->Create(media::TimeUnit::FromMicroseconds(aSample->mTime),
media::TimeUnit::FromMicroseconds(aSample->mDuration),
aSample->mOffset);
OutputFrame(data);
}
void Flush() override
{
mReorderQueue.Clear();
}
void Drain() override
{
while (!mReorderQueue.IsEmpty()) {
mCallback->Output(mReorderQueue.Pop().get());
if (!data) {
return DecodePromise::CreateAndReject(NS_ERROR_OUT_OF_MEMORY, __func__);
}
mCallback->DrainComplete();
// Frames come out in DTS order but we need to output them in PTS order.
mReorderQueue.Push(data);
if (mReorderQueue.Length() > mMaxRefFrames) {
return DecodePromise::CreateAndResolve(
DecodedData{ mReorderQueue.Pop().get() }, __func__);
}
return DecodePromise::CreateAndResolve(DecodedData(), __func__);
}
RefPtr<DecodePromise> Drain() override
{
DecodedData samples;
while (!mReorderQueue.IsEmpty()) {
samples.AppendElement(mReorderQueue.Pop().get());
}
return DecodePromise::CreateAndResolve(samples, __func__);
}
RefPtr<FlushPromise> Flush() override
{
mReorderQueue.Clear();
return FlushPromise::CreateAndResolve(true, __func__);
}
const char* GetDescriptionName() const override
@ -78,26 +92,8 @@ public:
return "blank media data decoder";
}
private:
void OutputFrame(MediaData* aData)
{
if (!aData) {
mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__));
return;
}
// Frames come out in DTS order but we need to output them in PTS order.
mReorderQueue.Push(aData);
while (mReorderQueue.Length() > mMaxRefFrames) {
mCallback->Output(mReorderQueue.Pop().get());
}
mCallback->InputExhausted();
}
private:
nsAutoPtr<BlankMediaDataCreator> mCreator;
MediaDataDecoderCallback* mCallback;
const uint32_t mMaxRefFrames;
ReorderQueue mReorderQueue;
TrackInfo::TrackType mType;

Просмотреть файл

@ -28,13 +28,11 @@ namespace mozilla {
OpusDataDecoder::OpusDataDecoder(const CreateDecoderParams& aParams)
: mInfo(aParams.AudioConfig())
, mTaskQueue(aParams.mTaskQueue)
, mCallback(aParams.mCallback)
, mOpusDecoder(nullptr)
, mSkip(0)
, mDecodedHeader(false)
, mPaddingDiscarded(false)
, mFrames(0)
, mIsFlushing(false)
{
}
@ -46,9 +44,13 @@ OpusDataDecoder::~OpusDataDecoder()
}
}
void
RefPtr<ShutdownPromise>
OpusDataDecoder::Shutdown()
{
RefPtr<OpusDataDecoder> self = this;
return InvokeAsync(mTaskQueue, __func__, [self]() {
return ShutdownPromise::CreateAndResolve(true, __func__);
});
}
void
@ -142,30 +144,15 @@ OpusDataDecoder::DecodeHeader(const unsigned char* aData, size_t aLength)
return NS_OK;
}
void
OpusDataDecoder::Input(MediaRawData* aSample)
RefPtr<MediaDataDecoder::DecodePromise>
OpusDataDecoder::Decode(MediaRawData* aSample)
{
mTaskQueue->Dispatch(NewRunnableMethod<RefPtr<MediaRawData>>(
this, &OpusDataDecoder::ProcessDecode, aSample));
return InvokeAsync<MediaRawData*>(mTaskQueue, this, __func__,
&OpusDataDecoder::ProcessDecode, aSample);
}
void
RefPtr<MediaDataDecoder::DecodePromise>
OpusDataDecoder::ProcessDecode(MediaRawData* aSample)
{
if (mIsFlushing) {
return;
}
MediaResult rv = DoDecode(aSample);
if (NS_FAILED(rv)) {
mCallback->Error(rv);
return;
}
mCallback->InputExhausted();
}
MediaResult
OpusDataDecoder::DoDecode(MediaRawData* aSample)
{
uint32_t channels = mOpusParser->mChannels;
@ -173,9 +160,10 @@ OpusDataDecoder::DoDecode(MediaRawData* aSample)
// Discard padding should be used only on the final packet, so
// decoding after a padding discard is invalid.
OPUS_DEBUG("Opus error, discard padding on interstitial packet");
return MediaResult(
NS_ERROR_DOM_MEDIA_FATAL_ERR,
RESULT_DETAIL("Discard padding on interstitial packet"));
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
RESULT_DETAIL("Discard padding on interstitial packet")),
__func__);
}
if (!mLastFrameTime || mLastFrameTime.ref() != aSample->mTime) {
@ -188,28 +176,32 @@ OpusDataDecoder::DoDecode(MediaRawData* aSample)
uint32_t frames_number = opus_packet_get_nb_frames(aSample->Data(),
aSample->Size());
if (frames_number <= 0) {
OPUS_DEBUG("Invalid packet header: r=%ld length=%ld",
frames_number, aSample->Size());
return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("Invalid packet header: r=%d length=%u",
frames_number, uint32_t(aSample->Size())));
OPUS_DEBUG("Invalid packet header: r=%ld length=%ld", frames_number,
aSample->Size());
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("Invalid packet header: r=%d length=%u",
frames_number, uint32_t(aSample->Size()))),
__func__);
}
uint32_t samples = opus_packet_get_samples_per_frame(
aSample->Data(), opus_int32(mOpusParser->mRate));
// A valid Opus packet must be between 2.5 and 120 ms long (48kHz).
uint32_t frames = frames_number*samples;
if (frames < 120 || frames > 5760) {
OPUS_DEBUG("Invalid packet frames: %u", frames);
return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("Invalid packet frames:%u", frames));
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("Invalid packet frames:%u", frames)),
__func__);
}
AlignedAudioBuffer buffer(frames * channels);
if (!buffer) {
return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__);
}
// Decode to the appropriate sample type.
@ -223,8 +215,10 @@ OpusDataDecoder::DoDecode(MediaRawData* aSample)
buffer.get(), frames, false);
#endif
if (ret < 0) {
return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("Opus decoding error:%d", ret));
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("Opus decoding error:%d", ret)),
__func__);
}
NS_ASSERTION(uint32_t(ret) == frames, "Opus decoded too few audio samples");
CheckedInt64 startTime = aSample->mTime;
@ -249,10 +243,12 @@ OpusDataDecoder::DoDecode(MediaRawData* aSample)
// Record the discard so we can return an error if another packet is
// decoded.
if (aSample->mDiscardPadding > frames) {
// Discarding more than the entire packet is invalid.
// Discarding more than the entire packet is invalid.
OPUS_DEBUG("Opus error, discard padding larger than packet");
return MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
RESULT_DETAIL("Discard padding larger than packet"));
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
RESULT_DETAIL("Discard padding larger than packet")),
__func__);
}
mPaddingDiscarded = true;
@ -281,59 +277,59 @@ OpusDataDecoder::DoDecode(MediaRawData* aSample)
CheckedInt64 duration = FramesToUsecs(frames, mOpusParser->mRate);
if (!duration.isValid()) {
return MediaResult(
NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
RESULT_DETAIL("Overflow converting WebM audio duration"));
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
RESULT_DETAIL("Overflow converting WebM audio duration")),
__func__);
}
CheckedInt64 time =
startTime - FramesToUsecs(mOpusParser->mPreSkip, mOpusParser->mRate) +
FramesToUsecs(mFrames, mOpusParser->mRate);
CheckedInt64 time = startTime -
FramesToUsecs(mOpusParser->mPreSkip, mOpusParser->mRate) +
FramesToUsecs(mFrames, mOpusParser->mRate);
if (!time.isValid()) {
return MediaResult(
NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
RESULT_DETAIL("Overflow shifting tstamp by codec delay"));
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
RESULT_DETAIL("Overflow shifting tstamp by codec delay")),
__func__);
};
mCallback->Output(new AudioData(aSample->mOffset,
time.value(),
duration.value(),
frames,
Move(buffer),
mOpusParser->mChannels,
mOpusParser->mRate));
mFrames += frames;
return NS_OK;
return DecodePromise::CreateAndResolve(
DecodedData{ new AudioData(aSample->mOffset, time.value(), duration.value(),
frames, Move(buffer), mOpusParser->mChannels,
mOpusParser->mRate) },
__func__);
}
void
OpusDataDecoder::ProcessDrain()
{
mCallback->DrainComplete();
}
void
RefPtr<MediaDataDecoder::DecodePromise>
OpusDataDecoder::Drain()
{
mTaskQueue->Dispatch(NewRunnableMethod(this, &OpusDataDecoder::ProcessDrain));
RefPtr<OpusDataDecoder> self = this;
// InvokeAsync dispatches a task that will be run after any pending decode
// completes. As such, once the drain task run, there's nothing more to do.
return InvokeAsync(mTaskQueue, __func__, [] {
return DecodePromise::CreateAndResolve(DecodedData(), __func__);
});
}
void
RefPtr<MediaDataDecoder::FlushPromise>
OpusDataDecoder::Flush()
{
if (!mOpusDecoder) {
return;
return FlushPromise::CreateAndResolve(true, __func__);
}
mIsFlushing = true;
nsCOMPtr<nsIRunnable> runnable = NS_NewRunnableFunction([this] () {
RefPtr<OpusDataDecoder> self = this;
return InvokeAsync(mTaskQueue, __func__, [self, this]() {
MOZ_ASSERT(mOpusDecoder);
// Reset the decoder.
opus_multistream_decoder_ctl(mOpusDecoder, OPUS_RESET_STATE);
mSkip = mOpusParser->mPreSkip;
mPaddingDiscarded = false;
mLastFrameTime.reset();
return FlushPromise::CreateAndResolve(true, __func__);
});
SyncRunnable::DispatchToThread(mTaskQueue, runnable);
mIsFlushing = false;
}
/* static */

Просмотреть файл

@ -24,10 +24,10 @@ public:
~OpusDataDecoder();
RefPtr<InitPromise> Init() override;
void Input(MediaRawData* aSample) override;
void Flush() override;
void Drain() override;
void Shutdown() override;
RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
RefPtr<DecodePromise> Drain() override;
RefPtr<FlushPromise> Flush() override;
RefPtr<ShutdownPromise> Shutdown() override;
const char* GetDescriptionName() const override
{
return "opus audio decoder";
@ -46,13 +46,10 @@ public:
private:
nsresult DecodeHeader(const unsigned char* aData, size_t aLength);
void ProcessDecode(MediaRawData* aSample);
MediaResult DoDecode(MediaRawData* aSample);
void ProcessDrain();
RefPtr<DecodePromise> ProcessDecode(MediaRawData* aSample);
const AudioInfo& mInfo;
const RefPtr<TaskQueue> mTaskQueue;
MediaDataDecoderCallback* mCallback;
// Opus decoder state
nsAutoPtr<OpusParser> mOpusParser;
@ -68,8 +65,6 @@ private:
int64_t mFrames;
Maybe<int64_t> mLastFrameTime;
uint8_t mMappingTable[MAX_AUDIO_CHANNELS]; // Channel mapping table.
Atomic<bool> mIsFlushing;
};
} // namespace mozilla

Просмотреть файл

@ -40,8 +40,6 @@ ogg_packet InitTheoraPacket(const unsigned char* aData, size_t aLength,
TheoraDecoder::TheoraDecoder(const CreateDecoderParams& aParams)
: mImageContainer(aParams.mImageContainer)
, mTaskQueue(aParams.mTaskQueue)
, mCallback(aParams.mCallback)
, mIsFlushing(false)
, mTheoraSetupInfo(nullptr)
, mTheoraDecoderContext(nullptr)
, mPacketCount(0)
@ -58,13 +56,17 @@ TheoraDecoder::~TheoraDecoder()
th_info_clear(&mTheoraInfo);
}
void
RefPtr<ShutdownPromise>
TheoraDecoder::Shutdown()
{
if (mTheoraDecoderContext) {
th_decode_free(mTheoraDecoderContext);
mTheoraDecoderContext = nullptr;
}
RefPtr<TheoraDecoder> self = this;
return InvokeAsync(mTaskQueue, __func__, [self, this]() {
if (mTheoraDecoderContext) {
th_decode_free(mTheoraDecoderContext);
mTheoraDecoderContext = nullptr;
}
return ShutdownPromise::CreateAndResolve(true, __func__);
});
}
RefPtr<MediaDataDecoder::InitPromise>
@ -98,16 +100,12 @@ TheoraDecoder::Init()
}
void
RefPtr<MediaDataDecoder::FlushPromise>
TheoraDecoder::Flush()
{
MOZ_ASSERT(mCallback->OnReaderTaskQueue());
mIsFlushing = true;
nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction([this] () {
// nothing to do for now.
return InvokeAsync(mTaskQueue, __func__, []() {
return FlushPromise::CreateAndResolve(true, __func__);
});
SyncRunnable::DispatchToThread(mTaskQueue, r);
mIsFlushing = false;
}
nsresult
@ -123,8 +121,8 @@ TheoraDecoder::DoDecodeHeader(const unsigned char* aData, size_t aLength)
return r > 0 ? NS_OK : NS_ERROR_FAILURE;
}
MediaResult
TheoraDecoder::DoDecode(MediaRawData* aSample)
RefPtr<MediaDataDecoder::DecodePromise>
TheoraDecoder::ProcessDecode(MediaRawData* aSample)
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
@ -181,52 +179,33 @@ TheoraDecoder::DoDecode(MediaRawData* aSample)
LOG("Image allocation error source %ldx%ld display %ldx%ld picture %ldx%ld",
mTheoraInfo.frame_width, mTheoraInfo.frame_height, mInfo.mDisplay.width, mInfo.mDisplay.height,
mInfo.mImage.width, mInfo.mImage.height);
return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_OUT_OF_MEMORY,
RESULT_DETAIL("Insufficient memory")),
__func__);
}
mCallback->Output(v);
return NS_OK;
} else {
LOG("Theora Decode error: %d", ret);
return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("Theora decode error:%d", ret));
return DecodePromise::CreateAndResolve(DecodedData{v}, __func__);
}
LOG("Theora Decode error: %d", ret);
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("Theora decode error:%d", ret)),
__func__);
}
void
TheoraDecoder::ProcessDecode(MediaRawData* aSample)
RefPtr<MediaDataDecoder::DecodePromise>
TheoraDecoder::Decode(MediaRawData* aSample)
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
if (mIsFlushing) {
return;
}
MediaResult rv = DoDecode(aSample);
if (NS_FAILED(rv)) {
mCallback->Error(rv);
} else {
mCallback->InputExhausted();
}
return InvokeAsync<MediaRawData*>(mTaskQueue, this, __func__,
&TheoraDecoder::ProcessDecode, aSample);
}
void
TheoraDecoder::Input(MediaRawData* aSample)
{
MOZ_ASSERT(mCallback->OnReaderTaskQueue());
mTaskQueue->Dispatch(NewRunnableMethod<RefPtr<MediaRawData>>(
this, &TheoraDecoder::ProcessDecode, aSample));
}
void
TheoraDecoder::ProcessDrain()
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
mCallback->DrainComplete();
}
void
RefPtr<MediaDataDecoder::DecodePromise>
TheoraDecoder::Drain()
{
MOZ_ASSERT(mCallback->OnReaderTaskQueue());
mTaskQueue->Dispatch(NewRunnableMethod(this, &TheoraDecoder::ProcessDrain));
return InvokeAsync(mTaskQueue, __func__, [] {
return DecodePromise::CreateAndResolve(DecodedData(), __func__);
});
}
/* static */

Просмотреть файл

@ -24,10 +24,10 @@ public:
~TheoraDecoder();
RefPtr<InitPromise> Init() override;
void Input(MediaRawData* aSample) override;
void Flush() override;
void Drain() override;
void Shutdown() override;
RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
RefPtr<DecodePromise> Drain() override;
RefPtr<FlushPromise> Flush() override;
RefPtr<ShutdownPromise> Shutdown() override;
// Return true if mimetype is a Theora codec
static bool IsTheora(const nsACString& aMimeType);
@ -40,14 +40,10 @@ public:
private:
nsresult DoDecodeHeader(const unsigned char* aData, size_t aLength);
void ProcessDecode(MediaRawData* aSample);
MediaResult DoDecode(MediaRawData* aSample);
void ProcessDrain();
RefPtr<DecodePromise> ProcessDecode(MediaRawData* aSample);
RefPtr<ImageContainer> mImageContainer;
RefPtr<TaskQueue> mTaskQueue;
MediaDataDecoderCallback* mCallback;
Atomic<bool> mIsFlushing;
// Theora header & decoder state
th_info mTheoraInfo;

Просмотреть файл

@ -69,8 +69,6 @@ InitContext(vpx_codec_ctx_t* aCtx,
VPXDecoder::VPXDecoder(const CreateDecoderParams& aParams)
: mImageContainer(aParams.mImageContainer)
, mTaskQueue(aParams.mTaskQueue)
, mCallback(aParams.mCallback)
, mIsFlushing(false)
, mInfo(aParams.VideoConfig())
, mCodec(MimeTypeToCodec(aParams.VideoConfig().mMimeType))
{
@ -84,11 +82,15 @@ VPXDecoder::~VPXDecoder()
MOZ_COUNT_DTOR(VPXDecoder);
}
void
RefPtr<ShutdownPromise>
VPXDecoder::Shutdown()
{
vpx_codec_destroy(&mVPX);
vpx_codec_destroy(&mVPXAlpha);
RefPtr<VPXDecoder> self = this;
return InvokeAsync(mTaskQueue, __func__, [self, this]() {
vpx_codec_destroy(&mVPX);
vpx_codec_destroy(&mVPXAlpha);
return ShutdownPromise::CreateAndResolve(true, __func__);
});
}
RefPtr<MediaDataDecoder::InitPromise>
@ -108,22 +110,19 @@ VPXDecoder::Init()
__func__);
}
void
RefPtr<MediaDataDecoder::FlushPromise>
VPXDecoder::Flush()
{
MOZ_ASSERT(mCallback->OnReaderTaskQueue());
mIsFlushing = true;
nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction([this] () {
// nothing to do for now.
return InvokeAsync(mTaskQueue, __func__, []() {
return FlushPromise::CreateAndResolve(true, __func__);
});
SyncRunnable::DispatchToThread(mTaskQueue, r);
mIsFlushing = false;
}
MediaResult
VPXDecoder::DoDecode(MediaRawData* aSample)
RefPtr<MediaDataDecoder::DecodePromise>
VPXDecoder::ProcessDecode(MediaRawData* aSample)
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
#if defined(DEBUG)
vpx_codec_stream_info_t si;
PodZero(&si);
@ -139,15 +138,17 @@ VPXDecoder::DoDecode(MediaRawData* aSample)
if (vpx_codec_err_t r = vpx_codec_decode(&mVPX, aSample->Data(), aSample->Size(), nullptr, 0)) {
LOG("VPX Decode error: %s", vpx_codec_err_to_string(r));
return MediaResult(
NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("VPX error: %s", vpx_codec_err_to_string(r)));
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("VPX error: %s", vpx_codec_err_to_string(r))),
__func__);
}
vpx_codec_iter_t iter = nullptr;
vpx_image_t *img;
vpx_image_t *img_alpha = nullptr;
bool alpha_decoded = false;
DecodedData results;
while ((img = vpx_codec_get_frame(&mVPX, &iter))) {
NS_ASSERTION(img->fmt == VPX_IMG_FMT_I420 ||
@ -157,10 +158,10 @@ VPXDecoder::DoDecode(MediaRawData* aSample)
"Multiple frames per packet that contains alpha");
if (aSample->AlphaSize() > 0) {
if(!alpha_decoded){
if (!alpha_decoded){
MediaResult rv = DecodeAlpha(&img_alpha, aSample);
if (NS_FAILED(rv)) {
return(rv);
return DecodePromise::CreateAndReject(rv, __func__);
}
alpha_decoded = true;
}
@ -195,8 +196,10 @@ VPXDecoder::DoDecode(MediaRawData* aSample)
b.mPlanes[2].mWidth = img->d_w;
} else {
LOG("VPX Unknown image format");
return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("VPX Unknown image format"));
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("VPX Unknown image format")),
__func__);
}
RefPtr<VideoData> v;
@ -233,56 +236,35 @@ VPXDecoder::DoDecode(MediaRawData* aSample)
}
if (!v) {
LOG("Image allocation error source %ldx%ld display %ldx%ld picture %ldx%ld",
img->d_w, img->d_h, mInfo.mDisplay.width, mInfo.mDisplay.height,
mInfo.mImage.width, mInfo.mImage.height);
return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
LOG(
"Image allocation error source %ldx%ld display %ldx%ld picture %ldx%ld",
img->d_w, img->d_h, mInfo.mDisplay.width, mInfo.mDisplay.height,
mInfo.mImage.width, mInfo.mImage.height);
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__);
}
mCallback->Output(v);
results.AppendElement(Move(v));
}
return NS_OK;
return DecodePromise::CreateAndResolve(Move(results), __func__);
}
void
VPXDecoder::ProcessDecode(MediaRawData* aSample)
RefPtr<MediaDataDecoder::DecodePromise>
VPXDecoder::Decode(MediaRawData* aSample)
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
if (mIsFlushing) {
return;
}
MediaResult rv = DoDecode(aSample);
if (NS_FAILED(rv)) {
mCallback->Error(rv);
} else {
mCallback->InputExhausted();
}
return InvokeAsync<MediaRawData*>(mTaskQueue, this, __func__,
&VPXDecoder::ProcessDecode, aSample);
}
void
VPXDecoder::Input(MediaRawData* aSample)
{
MOZ_ASSERT(mCallback->OnReaderTaskQueue());
mTaskQueue->Dispatch(NewRunnableMethod<RefPtr<MediaRawData>>(
this, &VPXDecoder::ProcessDecode, aSample));
}
void
VPXDecoder::ProcessDrain()
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
mCallback->DrainComplete();
}
void
RefPtr<MediaDataDecoder::DecodePromise>
VPXDecoder::Drain()
{
MOZ_ASSERT(mCallback->OnReaderTaskQueue());
mTaskQueue->Dispatch(NewRunnableMethod(this, &VPXDecoder::ProcessDrain));
return InvokeAsync(mTaskQueue, __func__, [] {
return DecodePromise::CreateAndResolve(DecodedData(), __func__);
});
}
MediaResult
VPXDecoder::DecodeAlpha(vpx_image_t** aImgAlpha,
MediaRawData* aSample)
VPXDecoder::DecodeAlpha(vpx_image_t** aImgAlpha, const MediaRawData* aSample)
{
vpx_codec_err_t r = vpx_codec_decode(&mVPXAlpha,
aSample->AlphaData(),

Просмотреть файл

@ -25,16 +25,17 @@ public:
~VPXDecoder();
RefPtr<InitPromise> Init() override;
void Input(MediaRawData* aSample) override;
void Flush() override;
void Drain() override;
void Shutdown() override;
RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
RefPtr<DecodePromise> Drain() override;
RefPtr<FlushPromise> Flush() override;
RefPtr<ShutdownPromise> Shutdown() override;
const char* GetDescriptionName() const override
{
return "libvpx video decoder";
}
enum Codec: uint8_t {
enum Codec: uint8_t
{
VP8 = 1 << 0,
VP9 = 1 << 1
};
@ -47,16 +48,11 @@ public:
static bool IsVP9(const nsACString& aMimeType);
private:
void ProcessDecode(MediaRawData* aSample);
MediaResult DoDecode(MediaRawData* aSample);
void ProcessDrain();
MediaResult DecodeAlpha(vpx_image_t** aImgAlpha,
MediaRawData* aSample);
RefPtr<DecodePromise> ProcessDecode(MediaRawData* aSample);
MediaResult DecodeAlpha(vpx_image_t** aImgAlpha, const MediaRawData* aSample);
const RefPtr<ImageContainer> mImageContainer;
const RefPtr<TaskQueue> mTaskQueue;
MediaDataDecoderCallback* mCallback;
Atomic<bool> mIsFlushing;
// VPx decoder state
vpx_codec_ctx_t mVPX;

Просмотреть файл

@ -33,10 +33,8 @@ ogg_packet InitVorbisPacket(const unsigned char* aData, size_t aLength,
VorbisDataDecoder::VorbisDataDecoder(const CreateDecoderParams& aParams)
: mInfo(aParams.AudioConfig())
, mTaskQueue(aParams.mTaskQueue)
, mCallback(aParams.mCallback)
, mPacketCount(0)
, mFrames(0)
, mIsFlushing(false)
{
// Zero these member vars to avoid crashes in Vorbis clear functions when
// destructor is called before |Init|.
@ -54,9 +52,13 @@ VorbisDataDecoder::~VorbisDataDecoder()
vorbis_comment_clear(&mVorbisComment);
}
void
RefPtr<ShutdownPromise>
VorbisDataDecoder::Shutdown()
{
RefPtr<VorbisDataDecoder> self = this;
return InvokeAsync(mTaskQueue, __func__, [self]() {
return ShutdownPromise::CreateAndResolve(true, __func__);
});
}
RefPtr<MediaDataDecoder::InitPromise>
@ -122,32 +124,15 @@ VorbisDataDecoder::DecodeHeader(const unsigned char* aData, size_t aLength)
return r == 0 ? NS_OK : NS_ERROR_FAILURE;
}
void
VorbisDataDecoder::Input(MediaRawData* aSample)
RefPtr<MediaDataDecoder::DecodePromise>
VorbisDataDecoder::Decode(MediaRawData* aSample)
{
MOZ_ASSERT(mCallback->OnReaderTaskQueue());
mTaskQueue->Dispatch(NewRunnableMethod<RefPtr<MediaRawData>>(
this, &VorbisDataDecoder::ProcessDecode, aSample));
return InvokeAsync<MediaRawData*>(mTaskQueue, this, __func__,
&VorbisDataDecoder::ProcessDecode, aSample);
}
void
RefPtr<MediaDataDecoder::DecodePromise>
VorbisDataDecoder::ProcessDecode(MediaRawData* aSample)
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
if (mIsFlushing) {
return;
}
MediaResult rv = DoDecode(aSample);
if (NS_FAILED(rv)) {
mCallback->Error(rv);
} else {
mCallback->InputExhausted();
}
}
MediaResult
VorbisDataDecoder::DoDecode(MediaRawData* aSample)
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
@ -170,27 +155,34 @@ VorbisDataDecoder::DoDecode(MediaRawData* aSample)
int err = vorbis_synthesis(&mVorbisBlock, &pkt);
if (err) {
return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("vorbis_synthesis:%d", err));
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("vorbis_synthesis:%d", err)),
__func__);
}
err = vorbis_synthesis_blockin(&mVorbisDsp, &mVorbisBlock);
if (err) {
return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("vorbis_synthesis_blockin:%d", err));
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("vorbis_synthesis_blockin:%d", err)),
__func__);
}
VorbisPCMValue** pcm = 0;
int32_t frames = vorbis_synthesis_pcmout(&mVorbisDsp, &pcm);
if (frames == 0) {
return NS_OK;
return DecodePromise::CreateAndResolve(DecodedData(), __func__);
}
DecodedData results;
while (frames > 0) {
uint32_t channels = mVorbisDsp.vi->channels;
uint32_t rate = mVorbisDsp.vi->rate;
AlignedAudioBuffer buffer(frames*channels);
if (!buffer) {
return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__);
}
for (uint32_t j = 0; j < channels; ++j) {
VorbisPCMValue* channel = pcm[j];
@ -201,21 +193,26 @@ VorbisDataDecoder::DoDecode(MediaRawData* aSample)
CheckedInt64 duration = FramesToUsecs(frames, rate);
if (!duration.isValid()) {
return MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
RESULT_DETAIL("Overflow converting audio duration"));
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
RESULT_DETAIL("Overflow converting audio duration")),
__func__);
}
CheckedInt64 total_duration = FramesToUsecs(mFrames, rate);
if (!total_duration.isValid()) {
return MediaResult(
NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
RESULT_DETAIL("Overflow converting audio total_duration"));
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
RESULT_DETAIL("Overflow converting audio total_duration")),
__func__);
}
CheckedInt64 time = total_duration + aTstampUsecs;
if (!time.isValid()) {
return MediaResult(
NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
RESULT_DETAIL("Overflow adding total_duration and aTstampUsecs"));
return DecodePromise::CreateAndReject(
MediaResult(
NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
RESULT_DETAIL("Overflow adding total_duration and aTstampUsecs")),
__func__);
};
if (!mAudioConverter) {
@ -223,9 +220,10 @@ VorbisDataDecoder::DoDecode(MediaRawData* aSample)
rate);
AudioConfig out(channels, rate);
if (!in.IsValid() || !out.IsValid()) {
return MediaResult(
NS_ERROR_DOM_MEDIA_FATAL_ERR,
RESULT_DETAIL("Invalid channel layout:%u", channels));
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
RESULT_DETAIL("Invalid channel layout:%u", channels)),
__func__);
}
mAudioConverter = MakeUnique<AudioConverter>(in, out);
}
@ -234,54 +232,43 @@ VorbisDataDecoder::DoDecode(MediaRawData* aSample)
data = mAudioConverter->Process(Move(data));
aTotalFrames += frames;
mCallback->Output(new AudioData(aOffset,
time.value(),
duration.value(),
frames,
data.Forget(),
channels,
rate));
results.AppendElement(new AudioData(aOffset, time.value(), duration.value(),
frames, data.Forget(), channels, rate));
mFrames += frames;
err = vorbis_synthesis_read(&mVorbisDsp, frames);
if (err) {
return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("vorbis_synthesis_read:%d", err));
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("vorbis_synthesis_read:%d", err)),
__func__);
}
frames = vorbis_synthesis_pcmout(&mVorbisDsp, &pcm);
}
return NS_OK;
return DecodePromise::CreateAndResolve(Move(results), __func__);
}
void
VorbisDataDecoder::ProcessDrain()
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
mCallback->DrainComplete();
}
void
RefPtr<MediaDataDecoder::DecodePromise>
VorbisDataDecoder::Drain()
{
MOZ_ASSERT(mCallback->OnReaderTaskQueue());
mTaskQueue->Dispatch(NewRunnableMethod(this, &VorbisDataDecoder::ProcessDrain));
return InvokeAsync(mTaskQueue, __func__, [] {
return DecodePromise::CreateAndResolve(DecodedData(), __func__);
});
}
void
RefPtr<MediaDataDecoder::FlushPromise>
VorbisDataDecoder::Flush()
{
MOZ_ASSERT(mCallback->OnReaderTaskQueue());
mIsFlushing = true;
nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction([this] () {
RefPtr<VorbisDataDecoder> self = this;
return InvokeAsync(mTaskQueue, __func__, [self, this]() {
// Ignore failed results from vorbis_synthesis_restart. They
// aren't fatal and it fails when ResetDecode is called at a
// time when no vorbis data has been read.
vorbis_synthesis_restart(&mVorbisDsp);
mLastFrameTime.reset();
return FlushPromise::CreateAndResolve(true, __func__);
});
SyncRunnable::DispatchToThread(mTaskQueue, r);
mIsFlushing = false;
}
/* static */

Просмотреть файл

@ -25,10 +25,10 @@ public:
~VorbisDataDecoder();
RefPtr<InitPromise> Init() override;
void Input(MediaRawData* aSample) override;
void Flush() override;
void Drain() override;
void Shutdown() override;
RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
RefPtr<DecodePromise> Drain() override;
RefPtr<FlushPromise> Flush() override;
RefPtr<ShutdownPromise> Shutdown() override;
const char* GetDescriptionName() const override
{
return "vorbis audio decoder";
@ -40,14 +40,10 @@ public:
private:
nsresult DecodeHeader(const unsigned char* aData, size_t aLength);
void ProcessDecode(MediaRawData* aSample);
MediaResult DoDecode(MediaRawData* aSample);
void ProcessDrain();
RefPtr<DecodePromise> ProcessDecode(MediaRawData* aSample);
const AudioInfo& mInfo;
const RefPtr<TaskQueue> mTaskQueue;
MediaDataDecoderCallback* mCallback;
// Vorbis decoder state
vorbis_info mVorbisInfo;
@ -59,7 +55,6 @@ private:
int64_t mFrames;
Maybe<int64_t> mLastFrameTime;
UniquePtr<AudioConverter> mAudioConverter;
Atomic<bool> mIsFlushing;
};
} // namespace mozilla

Просмотреть файл

@ -47,13 +47,17 @@ DecodeULawSample(uint8_t aValue)
WaveDataDecoder::WaveDataDecoder(const CreateDecoderParams& aParams)
: mInfo(aParams.AudioConfig())
, mCallback(aParams.mCallback)
, mTaskQueue(aParams.mTaskQueue)
{
}
void
RefPtr<ShutdownPromise>
WaveDataDecoder::Shutdown()
{
RefPtr<WaveDataDecoder> self = this;
return InvokeAsync(mTaskQueue, __func__, [self]() {
return ShutdownPromise::CreateAndResolve(true, __func__);
});
}
RefPtr<MediaDataDecoder::InitPromise>
@ -62,19 +66,15 @@ WaveDataDecoder::Init()
return InitPromise::CreateAndResolve(TrackInfo::kAudioTrack, __func__);
}
void
WaveDataDecoder::Input(MediaRawData* aSample)
RefPtr<MediaDataDecoder::DecodePromise>
WaveDataDecoder::Decode(MediaRawData* aSample)
{
MediaResult rv = DoDecode(aSample);
if (NS_FAILED(rv)) {
mCallback->Error(rv);
} else {
mCallback->InputExhausted();
}
return InvokeAsync<MediaRawData*>(mTaskQueue, this, __func__,
&WaveDataDecoder::ProcessDecode, aSample);
}
MediaResult
WaveDataDecoder::DoDecode(MediaRawData* aSample)
RefPtr<MediaDataDecoder::DecodePromise>
WaveDataDecoder::ProcessDecode(MediaRawData* aSample)
{
size_t aLength = aSample->Size();
ByteReader aReader(aSample->Data(), aLength);
@ -85,7 +85,8 @@ WaveDataDecoder::DoDecode(MediaRawData* aSample)
AlignedAudioBuffer buffer(frames * mInfo.mChannels);
if (!buffer) {
return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__);
}
for (int i = 0; i < frames; ++i) {
for (unsigned int j = 0; j < mInfo.mChannels; ++j) {
@ -119,26 +120,26 @@ WaveDataDecoder::DoDecode(MediaRawData* aSample)
int64_t duration = frames / mInfo.mRate;
mCallback->Output(new AudioData(aOffset,
aTstampUsecs,
duration,
frames,
Move(buffer),
mInfo.mChannels,
mInfo.mRate));
return NS_OK;
return DecodePromise::CreateAndResolve(
DecodedData{ new AudioData(aOffset, aTstampUsecs, duration, frames,
Move(buffer), mInfo.mChannels, mInfo.mRate) },
__func__);
}
void
RefPtr<MediaDataDecoder::DecodePromise>
WaveDataDecoder::Drain()
{
mCallback->DrainComplete();
return InvokeAsync(mTaskQueue, __func__, [] {
return DecodePromise::CreateAndResolve(DecodedData(), __func__);
});
}
void
RefPtr<MediaDataDecoder::FlushPromise>
WaveDataDecoder::Flush()
{
return InvokeAsync(mTaskQueue, __func__, []() {
return FlushPromise::CreateAndResolve(true, __func__);
});
}
/* static */

Просмотреть файл

@ -21,20 +21,19 @@ public:
static bool IsWave(const nsACString& aMimeType);
RefPtr<InitPromise> Init() override;
void Input(MediaRawData* aSample) override;
void Flush() override;
void Drain() override;
void Shutdown() override;
RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
RefPtr<DecodePromise> Drain() override;
RefPtr<FlushPromise> Flush() override;
RefPtr<ShutdownPromise> Shutdown() override;
const char* GetDescriptionName() const override
{
return "wave audio decoder";
}
private:
MediaResult DoDecode(MediaRawData* aSample);
RefPtr<DecodePromise> ProcessDecode(MediaRawData* aSample);
const AudioInfo& mInfo;
MediaDataDecoderCallback* mCallback;
const RefPtr<TaskQueue> mTaskQueue;
};
} // namespace mozilla

Просмотреть файл

@ -24,53 +24,68 @@ namespace mozilla {
typedef MozPromiseRequestHolder<CDMProxy::DecryptPromise> DecryptPromiseRequestHolder;
extern already_AddRefed<PlatformDecoderModule> CreateBlankDecoderModule();
class EMEDecryptor : public MediaDataDecoder {
class EMEDecryptor : public MediaDataDecoder
{
public:
EMEDecryptor(MediaDataDecoder* aDecoder,
MediaDataDecoderCallback* aCallback,
CDMProxy* aProxy,
TaskQueue* aDecodeTaskQueue)
EMEDecryptor(MediaDataDecoder* aDecoder, CDMProxy* aProxy,
TaskQueue* aDecodeTaskQueue, TrackInfo::TrackType aType,
MediaEventProducer<TrackInfo::TrackType>* aOnWaitingForKey)
: mDecoder(aDecoder)
, mCallback(aCallback)
, mTaskQueue(aDecodeTaskQueue)
, mProxy(aProxy)
, mSamplesWaitingForKey(new SamplesWaitingForKey(this, this->mCallback,
mTaskQueue, mProxy))
, mSamplesWaitingForKey(
new SamplesWaitingForKey(mProxy, aType, aOnWaitingForKey))
, mIsShutdown(false)
{
}
RefPtr<InitPromise> Init() override {
RefPtr<InitPromise> Init() override
{
MOZ_ASSERT(!mIsShutdown);
return mDecoder->Init();
}
void Input(MediaRawData* aSample) override {
RefPtr<DecodePromise> Decode(MediaRawData* aSample) override
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
MOZ_RELEASE_ASSERT(mDecrypts.Count() == 0,
"Can only process one sample at a time");
RefPtr<DecodePromise> p = mDecodePromise.Ensure(__func__);
AttemptDecode(aSample);
return p;
}
void AttemptDecode(MediaRawData* aSample)
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
if (mIsShutdown) {
NS_WARNING("EME encrypted sample arrived after shutdown");
return;
}
if (mSamplesWaitingForKey->WaitIfKeyNotUsable(aSample)) {
mDecodePromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
return;
}
nsAutoPtr<MediaRawDataWriter> writer(aSample->CreateWriter());
mProxy->GetSessionIdsForKeyId(aSample->mCrypto.mKeyId,
writer->mCrypto.mSessionIds);
RefPtr<EMEDecryptor> self = this;
mSamplesWaitingForKey->WaitIfKeyNotUsable(aSample)
->Then(mTaskQueue, __func__,
[self, this](MediaRawData* aSample) {
mKeyRequest.Complete();
nsAutoPtr<MediaRawDataWriter> writer(aSample->CreateWriter());
mProxy->GetSessionIdsForKeyId(aSample->mCrypto.mKeyId,
writer->mCrypto.mSessionIds);
mDecrypts.Put(aSample, new DecryptPromiseRequestHolder());
mProxy->Decrypt(aSample)->Then(
mTaskQueue, __func__, this,
&EMEDecryptor::Decrypted,
&EMEDecryptor::Decrypted)
->Track(*mDecrypts.Get(aSample));
return;
mDecrypts.Put(aSample, new DecryptPromiseRequestHolder());
mProxy->Decrypt(aSample)
->Then(mTaskQueue, __func__, this,
&EMEDecryptor::Decrypted,
&EMEDecryptor::Decrypted)
->Track(*mDecrypts.Get(aSample));
},
[self, this]() { mKeyRequest.Complete(); })
->Track(mKeyRequest);
}
void Decrypted(const DecryptResult& aDecrypted) {
void Decrypted(const DecryptResult& aDecrypted)
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
MOZ_ASSERT(aDecrypted.mSample);
@ -91,117 +106,180 @@ public:
if (aDecrypted.mStatus == NoKeyErr) {
// Key became unusable after we sent the sample to CDM to decrypt.
// Call Input() again, so that the sample is enqueued for decryption
// Call Decode() again, so that the sample is enqueued for decryption
// if the key becomes usable again.
Input(aDecrypted.mSample);
AttemptDecode(aDecrypted.mSample);
} else if (aDecrypted.mStatus != Ok) {
if (mCallback) {
mCallback->Error(MediaResult(
mDecodePromise.RejectIfExists(
MediaResult(
NS_ERROR_DOM_MEDIA_FATAL_ERR,
RESULT_DETAIL("decrypted.mStatus=%u", uint32_t(aDecrypted.mStatus))));
}
RESULT_DETAIL("decrypted.mStatus=%u", uint32_t(aDecrypted.mStatus))),
__func__);
} else {
MOZ_ASSERT(!mIsShutdown);
// The sample is no longer encrypted, so clear its crypto metadata.
UniquePtr<MediaRawDataWriter> writer(aDecrypted.mSample->CreateWriter());
writer->mCrypto = CryptoSample();
mDecoder->Input(aDecrypted.mSample);
RefPtr<EMEDecryptor> self = this;
mDecoder->Decode(aDecrypted.mSample)
->Then(mTaskQueue, __func__,
[self, this](const DecodedData& aResults) {
mDecodeRequest.Complete();
mDecodePromise.ResolveIfExists(aResults, __func__);
},
[self, this](const MediaResult& aError) {
mDecodeRequest.Complete();
mDecodePromise.RejectIfExists(aError, __func__);
})
->Track(mDecodeRequest);
}
}
void Flush() override {
RefPtr<FlushPromise> Flush() override
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
MOZ_ASSERT(!mIsShutdown);
mKeyRequest.DisconnectIfExists();
mDecodeRequest.DisconnectIfExists();
mDecodePromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
for (auto iter = mDecrypts.Iter(); !iter.Done(); iter.Next()) {
nsAutoPtr<DecryptPromiseRequestHolder>& holder = iter.Data();
holder->DisconnectIfExists();
iter.Remove();
}
mDecoder->Flush();
mSamplesWaitingForKey->Flush();
RefPtr<EMEDecryptor> self = this;
return mDecoder->Flush()->Then(mTaskQueue, __func__,
[self, this]() {
mSamplesWaitingForKey->Flush();
},
[self, this]() {
mSamplesWaitingForKey->Flush();
});
}
void Drain() override {
RefPtr<DecodePromise> Drain() override
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
MOZ_ASSERT(!mIsShutdown);
MOZ_ASSERT(mDecodePromise.IsEmpty() && !mDecodeRequest.Exists(),
"Must wait for decoding to complete");
for (auto iter = mDecrypts.Iter(); !iter.Done(); iter.Next()) {
nsAutoPtr<DecryptPromiseRequestHolder>& holder = iter.Data();
holder->DisconnectIfExists();
iter.Remove();
}
mDecoder->Drain();
return mDecoder->Drain();
}
void Shutdown() override {
RefPtr<ShutdownPromise> Shutdown() override
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
MOZ_ASSERT(!mIsShutdown);
mIsShutdown = true;
mDecoder->Shutdown();
mSamplesWaitingForKey->BreakCycles();
mSamplesWaitingForKey = nullptr;
mDecoder = nullptr;
RefPtr<MediaDataDecoder> decoder = mDecoder.forget();
mProxy = nullptr;
mCallback = nullptr;
return decoder->Shutdown();
}
const char* GetDescriptionName() const override {
const char* GetDescriptionName() const override
{
return mDecoder->GetDescriptionName();
}
private:
RefPtr<MediaDataDecoder> mDecoder;
MediaDataDecoderCallback* mCallback;
RefPtr<TaskQueue> mTaskQueue;
RefPtr<CDMProxy> mProxy;
nsClassHashtable<nsRefPtrHashKey<MediaRawData>, DecryptPromiseRequestHolder> mDecrypts;
RefPtr<SamplesWaitingForKey> mSamplesWaitingForKey;
MozPromiseRequestHolder<SamplesWaitingForKey::WaitForKeyPromise> mKeyRequest;
MozPromiseHolder<DecodePromise> mDecodePromise;
MozPromiseHolder<DecodePromise> mDrainPromise;
MozPromiseHolder<FlushPromise> mFlushPromise;
MozPromiseRequestHolder<DecodePromise> mDecodeRequest;
bool mIsShutdown;
};
class EMEMediaDataDecoderProxy : public MediaDataDecoderProxy {
class EMEMediaDataDecoderProxy : public MediaDataDecoderProxy
{
public:
EMEMediaDataDecoderProxy(already_AddRefed<AbstractThread> aProxyThread,
MediaDataDecoderCallback* aCallback,
CDMProxy* aProxy,
TaskQueue* aTaskQueue)
: MediaDataDecoderProxy(Move(aProxyThread), aCallback)
, mSamplesWaitingForKey(new SamplesWaitingForKey(this, aCallback,
aTaskQueue, aProxy))
, mProxy(aProxy)
EMEMediaDataDecoderProxy(
already_AddRefed<AbstractThread> aProxyThread, CDMProxy* aProxy,
TrackInfo::TrackType aType,
MediaEventProducer<TrackInfo::TrackType>* aOnWaitingForKey)
: MediaDataDecoderProxy(Move(aProxyThread))
, mTaskQueue(AbstractThread::GetCurrent()->AsTaskQueue())
, mSamplesWaitingForKey(
new SamplesWaitingForKey(aProxy, aType, aOnWaitingForKey))
, mProxy(aProxy)
{
}
void Input(MediaRawData* aSample) override;
void Shutdown() override;
RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
RefPtr<FlushPromise> Flush() override;
RefPtr<ShutdownPromise> Shutdown() override;
private:
RefPtr<TaskQueue> mTaskQueue;
RefPtr<SamplesWaitingForKey> mSamplesWaitingForKey;
MozPromiseRequestHolder<SamplesWaitingForKey::WaitForKeyPromise> mKeyRequest;
MozPromiseHolder<DecodePromise> mDecodePromise;
MozPromiseRequestHolder<DecodePromise> mDecodeRequest;
RefPtr<CDMProxy> mProxy;
};
void
EMEMediaDataDecoderProxy::Input(MediaRawData* aSample)
RefPtr<MediaDataDecoder::DecodePromise>
EMEMediaDataDecoderProxy::Decode(MediaRawData* aSample)
{
if (mSamplesWaitingForKey->WaitIfKeyNotUsable(aSample)) {
return;
}
RefPtr<DecodePromise> p = mDecodePromise.Ensure(__func__);
nsAutoPtr<MediaRawDataWriter> writer(aSample->CreateWriter());
mProxy->GetSessionIdsForKeyId(aSample->mCrypto.mKeyId,
writer->mCrypto.mSessionIds);
RefPtr<EMEMediaDataDecoderProxy> self = this;
mSamplesWaitingForKey->WaitIfKeyNotUsable(aSample)
->Then(mTaskQueue, __func__,
[self, this](MediaRawData* aSample) {
mKeyRequest.Complete();
MediaDataDecoderProxy::Input(aSample);
nsAutoPtr<MediaRawDataWriter> writer(aSample->CreateWriter());
mProxy->GetSessionIdsForKeyId(aSample->mCrypto.mKeyId,
writer->mCrypto.mSessionIds);
MediaDataDecoderProxy::Decode(aSample)
->Then(mTaskQueue, __func__,
[self, this](const DecodedData& aResults) {
mDecodeRequest.Complete();
mDecodePromise.Resolve(aResults, __func__);
},
[self, this](const MediaResult& aError) {
mDecodeRequest.Complete();
mDecodePromise.Reject(aError, __func__);
})
->Track(mDecodeRequest);
},
[self, this]() {
mKeyRequest.Complete();
MOZ_CRASH("Should never get here");
})
->Track(mKeyRequest);
return p;
}
void
RefPtr<MediaDataDecoder::FlushPromise>
EMEMediaDataDecoderProxy::Flush()
{
mKeyRequest.DisconnectIfExists();
mDecodeRequest.DisconnectIfExists();
mDecodePromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
return MediaDataDecoderProxy::Flush();
}
RefPtr<ShutdownPromise>
EMEMediaDataDecoderProxy::Shutdown()
{
MediaDataDecoderProxy::Shutdown();
mSamplesWaitingForKey->BreakCycles();
mSamplesWaitingForKey = nullptr;
mProxy = nullptr;
return MediaDataDecoderProxy::Shutdown();
}
EMEDecoderModule::EMEDecoderModule(CDMProxy* aProxy, PDMFactory* aPDM)
@ -215,7 +293,7 @@ EMEDecoderModule::~EMEDecoderModule()
}
static already_AddRefed<MediaDataDecoderProxy>
CreateDecoderWrapper(MediaDataDecoderCallback* aCallback, CDMProxy* aProxy, TaskQueue* aTaskQueue)
CreateDecoderWrapper(CDMProxy* aProxy, const CreateDecoderParams& aParams)
{
RefPtr<gmp::GeckoMediaPluginService> s(gmp::GeckoMediaPluginService::GetGeckoMediaPluginService());
if (!s) {
@ -225,8 +303,8 @@ CreateDecoderWrapper(MediaDataDecoderCallback* aCallback, CDMProxy* aProxy, Task
if (!thread) {
return nullptr;
}
RefPtr<MediaDataDecoderProxy> decoder(
new EMEMediaDataDecoderProxy(thread.forget(), aCallback, aProxy, aTaskQueue));
RefPtr<MediaDataDecoderProxy> decoder(new EMEMediaDataDecoderProxy(
thread.forget(), aProxy, aParams.mType, aParams.mOnWaitingForKeyEvent));
return decoder.forget();
}
@ -244,8 +322,8 @@ EMEDecoderModule::CreateVideoDecoder(const CreateDecoderParams& aParams)
if (SupportsMimeType(aParams.mConfig.mMimeType, nullptr)) {
// GMP decodes. Assume that means it can decrypt too.
RefPtr<MediaDataDecoderProxy> wrapper =
CreateDecoderWrapper(aParams.mCallback, mProxy, aParams.mTaskQueue);
auto params = GMPVideoDecoderParams(aParams).WithCallback(wrapper);
CreateDecoderWrapper(mProxy, aParams);
auto params = GMPVideoDecoderParams(aParams);
wrapper->SetProxyTarget(new EMEVideoDecoder(mProxy, params));
return wrapper.forget();
}
@ -256,10 +334,9 @@ EMEDecoderModule::CreateVideoDecoder(const CreateDecoderParams& aParams)
return nullptr;
}
RefPtr<MediaDataDecoder> emeDecoder(new EMEDecryptor(decoder,
aParams.mCallback,
mProxy,
AbstractThread::GetCurrent()->AsTaskQueue()));
RefPtr<MediaDataDecoder> emeDecoder(new EMEDecryptor(
decoder, mProxy, AbstractThread::GetCurrent()->AsTaskQueue(),
aParams.mType, aParams.mOnWaitingForKeyEvent));
return emeDecoder.forget();
}
@ -283,10 +360,9 @@ EMEDecoderModule::CreateAudioDecoder(const CreateDecoderParams& aParams)
return nullptr;
}
RefPtr<MediaDataDecoder> emeDecoder(new EMEDecryptor(decoder,
aParams.mCallback,
mProxy,
AbstractThread::GetCurrent()->AsTaskQueue()));
RefPtr<MediaDataDecoder> emeDecoder(new EMEDecryptor(
decoder, mProxy, AbstractThread::GetCurrent()->AsTaskQueue(),
aParams.mType, aParams.mOnWaitingForKeyEvent));
return emeDecoder.forget();
}

Просмотреть файл

@ -15,9 +15,8 @@ namespace mozilla {
class CDMProxy;
class EMEDecoderModule : public PlatformDecoderModule {
private:
class EMEDecoderModule : public PlatformDecoderModule
{
public:
EMEDecoderModule(CDMProxy* aProxy, PDMFactory* aPDM);
@ -43,8 +42,6 @@ private:
RefPtr<CDMProxy> mProxy;
// Will be null if CDM has decoding capability.
RefPtr<PDMFactory> mPDM;
// We run the PDM on its own task queue.
RefPtr<TaskQueue> mTaskQueue;
};
} // namespace mozilla

Просмотреть файл

@ -4,36 +4,23 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "mozilla/CDMProxy.h"
#include "EMEVideoDecoder.h"
#include "GMPVideoEncodedFrameImpl.h"
#include "mozilla/CDMProxy.h"
#include "MediaData.h"
#include "MP4Decoder.h"
#include "PlatformDecoderModule.h"
#include "VPXDecoder.h"
namespace mozilla {
void
EMEVideoCallbackAdapter::Error(GMPErr aErr)
{
if (aErr == GMPNoKeyErr) {
// The GMP failed to decrypt a frame due to not having a key. This can
// happen if a key expires or a session is closed during playback.
NS_WARNING("GMP failed to decrypt due to lack of key");
return;
}
VideoCallbackAdapter::Error(aErr);
}
EMEVideoDecoder::EMEVideoDecoder(CDMProxy* aProxy,
const GMPVideoDecoderParams& aParams)
: GMPVideoDecoder(GMPVideoDecoderParams(aParams).WithAdapter(
new EMEVideoCallbackAdapter(aParams.mCallback,
VideoInfo(aParams.mConfig.mDisplay),
aParams.mImageContainer)))
: GMPVideoDecoder(GMPVideoDecoderParams(aParams))
, mProxy(aProxy)
, mDecryptorId(aProxy->GetDecryptorId())
{}
{
}
void
EMEVideoDecoder::InitTags(nsTArray<nsCString>& aTags)

Просмотреть файл

@ -8,24 +8,13 @@
#define EMEVideoDecoder_h_
#include "GMPVideoDecoder.h"
#include "PlatformDecoderModule.h"
namespace mozilla {
class CDMProxy;
class MediaRawData;
class TaskQueue;
class EMEVideoCallbackAdapter : public VideoCallbackAdapter {
public:
EMEVideoCallbackAdapter(MediaDataDecoderCallbackProxy* aCallback,
VideoInfo aVideoInfo,
layers::ImageContainer* aImageContainer)
: VideoCallbackAdapter(aCallback, aVideoInfo, aImageContainer)
{}
void Error(GMPErr aErr) override;
};
class EMEVideoDecoder : public GMPVideoDecoder {
public:
EMEVideoDecoder(CDMProxy* aProxy, const GMPVideoDecoderParams& aParams);

Просмотреть файл

@ -4,47 +4,53 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "SamplesWaitingForKey.h"
#include "mozilla/CDMProxy.h"
#include "mozilla/CDMCaps.h"
#include "mozilla/TaskQueue.h"
#include "MediaData.h"
#include "MediaEventSource.h"
#include "SamplesWaitingForKey.h"
namespace mozilla {
SamplesWaitingForKey::SamplesWaitingForKey(MediaDataDecoder* aDecoder,
MediaDataDecoderCallback* aCallback,
TaskQueue* aTaskQueue,
CDMProxy* aProxy)
SamplesWaitingForKey::SamplesWaitingForKey(
CDMProxy* aProxy, TrackInfo::TrackType aType,
MediaEventProducer<TrackInfo::TrackType>* aOnWaitingForKey)
: mMutex("SamplesWaitingForKey")
, mDecoder(aDecoder)
, mDecoderCallback(aCallback)
, mTaskQueue(aTaskQueue)
, mProxy(aProxy)
, mType(aType)
, mOnWaitingForKeyEvent(aOnWaitingForKey)
{
}
SamplesWaitingForKey::~SamplesWaitingForKey()
{
Flush();
}
bool
RefPtr<SamplesWaitingForKey::WaitForKeyPromise>
SamplesWaitingForKey::WaitIfKeyNotUsable(MediaRawData* aSample)
{
if (!aSample || !aSample->mCrypto.mValid || !mProxy) {
return false;
return WaitForKeyPromise::CreateAndResolve(aSample, __func__);
}
CDMCaps::AutoLock caps(mProxy->Capabilites());
const auto& keyid = aSample->mCrypto.mKeyId;
if (!caps.IsKeyUsable(keyid)) {
{
MutexAutoLock lock(mMutex);
mSamples.AppendElement(aSample);
}
mDecoderCallback->WaitingForKey();
caps.NotifyWhenKeyIdUsable(aSample->mCrypto.mKeyId, this);
return true;
if (caps.IsKeyUsable(keyid)) {
return WaitForKeyPromise::CreateAndResolve(aSample, __func__);
}
return false;
SampleEntry entry;
entry.mSample = aSample;
RefPtr<WaitForKeyPromise> p = entry.mPromise.Ensure(__func__);
{
MutexAutoLock lock(mMutex);
mSamples.AppendElement(Move(entry));
}
if (mOnWaitingForKeyEvent) {
mOnWaitingForKeyEvent->Notify(mType);
}
caps.NotifyWhenKeyIdUsable(aSample->mCrypto.mKeyId, this);
return p;
}
void
@ -53,13 +59,10 @@ SamplesWaitingForKey::NotifyUsable(const CencKeyId& aKeyId)
MutexAutoLock lock(mMutex);
size_t i = 0;
while (i < mSamples.Length()) {
if (aKeyId == mSamples[i]->mCrypto.mKeyId) {
RefPtr<nsIRunnable> task;
task = NewRunnableMethod<RefPtr<MediaRawData>>(mDecoder,
&MediaDataDecoder::Input,
RefPtr<MediaRawData>(mSamples[i]));
auto& entry = mSamples[i];
if (aKeyId == entry.mSample->mCrypto.mKeyId) {
entry.mPromise.Resolve(entry.mSample, __func__);
mSamples.RemoveElementAt(i);
mTaskQueue->Dispatch(task.forget());
} else {
i++;
}
@ -70,16 +73,9 @@ void
SamplesWaitingForKey::Flush()
{
MutexAutoLock lock(mMutex);
mSamples.Clear();
}
void
SamplesWaitingForKey::BreakCycles()
{
MutexAutoLock lock(mMutex);
mDecoder = nullptr;
mTaskQueue = nullptr;
mProxy = nullptr;
for (auto& sample : mSamples) {
sample.mPromise.Reject(true, __func__);
}
mSamples.Clear();
}

Просмотреть файл

@ -7,50 +7,54 @@
#ifndef SamplesWaitingForKey_h_
#define SamplesWaitingForKey_h_
#include "mozilla/TaskQueue.h"
#include "PlatformDecoderModule.h"
#include "mozilla/MozPromise.h"
#include "mozilla/Mutex.h"
#include "mozilla/RefPtr.h"
#include "MediaInfo.h"
namespace mozilla {
typedef nsTArray<uint8_t> CencKeyId;
class CDMProxy;
template <typename... Es> class MediaEventProducer;
class MediaRawData;
// Encapsulates the task of waiting for the CDMProxy to have the necessary
// keys to decrypt a given sample.
class SamplesWaitingForKey {
class SamplesWaitingForKey
{
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(SamplesWaitingForKey)
explicit SamplesWaitingForKey(MediaDataDecoder* aDecoder,
MediaDataDecoderCallback* aCallback,
TaskQueue* aTaskQueue,
CDMProxy* aProxy);
typedef MozPromise<RefPtr<MediaRawData>, bool, /* IsExclusive = */ true>
WaitForKeyPromise;
// Returns true if we need to wait for a key to become usable.
// Will callback MediaDataDecoder::Input(aSample) on mDecoder once the
// sample is ready to be decrypted. The order of input samples is
// preserved.
bool WaitIfKeyNotUsable(MediaRawData* aSample);
SamplesWaitingForKey(CDMProxy* aProxy, TrackInfo::TrackType aType,
MediaEventProducer<TrackInfo::TrackType>* aOnWaitingForKey);
// Returns a promise that will be resolved if or when a key for decoding the
// sample becomes usable.
RefPtr<WaitForKeyPromise> WaitIfKeyNotUsable(MediaRawData* aSample);
void NotifyUsable(const CencKeyId& aKeyId);
void Flush();
void BreakCycles();
protected:
~SamplesWaitingForKey();
private:
Mutex mMutex;
RefPtr<MediaDataDecoder> mDecoder;
MediaDataDecoderCallback* mDecoderCallback;
RefPtr<TaskQueue> mTaskQueue;
RefPtr<CDMProxy> mProxy;
nsTArray<RefPtr<MediaRawData>> mSamples;
struct SampleEntry
{
RefPtr<MediaRawData> mSample;
MozPromiseHolder<WaitForKeyPromise> mPromise;
};
nsTArray<SampleEntry> mSamples;
const TrackInfo::TrackType mType;
MediaEventProducer<TrackInfo::TrackType>* const mOnWaitingForKeyEvent;
};
} // namespace mozilla

Просмотреть файл

@ -32,7 +32,7 @@ GMPDecoderModule::~GMPDecoderModule()
}
static already_AddRefed<MediaDataDecoderProxy>
CreateDecoderWrapper(MediaDataDecoderCallback* aCallback)
CreateDecoderWrapper()
{
RefPtr<gmp::GeckoMediaPluginService> s(gmp::GeckoMediaPluginService::GetGeckoMediaPluginService());
if (!s) {
@ -42,7 +42,7 @@ CreateDecoderWrapper(MediaDataDecoderCallback* aCallback)
if (!thread) {
return nullptr;
}
RefPtr<MediaDataDecoderProxy> decoder(new MediaDataDecoderProxy(thread.forget(), aCallback));
RefPtr<MediaDataDecoderProxy> decoder(new MediaDataDecoderProxy(thread.forget()));
return decoder.forget();
}
@ -55,8 +55,8 @@ GMPDecoderModule::CreateVideoDecoder(const CreateDecoderParams& aParams)
return nullptr;
}
RefPtr<MediaDataDecoderProxy> wrapper = CreateDecoderWrapper(aParams.mCallback);
auto params = GMPVideoDecoderParams(aParams).WithCallback(wrapper);
RefPtr<MediaDataDecoderProxy> wrapper = CreateDecoderWrapper();
auto params = GMPVideoDecoderParams(aParams);
wrapper->SetProxyTarget(new GMPVideoDecoder(params));
return wrapper.forget();
}

Просмотреть файл

@ -27,8 +27,17 @@ static bool IsOnGMPThread()
}
#endif
GMPVideoDecoderParams::GMPVideoDecoderParams(const CreateDecoderParams& aParams)
: mConfig(aParams.VideoConfig())
, mTaskQueue(aParams.mTaskQueue)
, mImageContainer(aParams.mImageContainer)
, mLayersBackend(aParams.GetLayersBackend())
, mCrashHelper(aParams.mCrashHelper)
{
}
void
VideoCallbackAdapter::Decoded(GMPVideoi420Frame* aDecodedFrame)
GMPVideoDecoder::Decoded(GMPVideoi420Frame* aDecodedFrame)
{
GMPUniquePtr<GMPVideoi420Frame> decodedFrame(aDecodedFrame);
@ -51,7 +60,7 @@ VideoCallbackAdapter::Decoded(GMPVideoi420Frame* aDecodedFrame)
gfx::IntRect pictureRegion(0, 0, decodedFrame->Width(), decodedFrame->Height());
RefPtr<VideoData> v =
VideoData::CreateAndCopyData(mVideoInfo,
VideoData::CreateAndCopyData(mConfig,
mImageContainer,
mLastStreamOffset,
decodedFrame->Timestamp(),
@ -60,110 +69,80 @@ VideoCallbackAdapter::Decoded(GMPVideoi420Frame* aDecodedFrame)
false,
-1,
pictureRegion);
RefPtr<GMPVideoDecoder> self = this;
if (v) {
mCallback->Output(v);
mDecodedData.AppendElement(Move(v));
} else {
mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__));
mDecodedData.Clear();
mDecodePromise.RejectIfExists(
MediaResult(NS_ERROR_OUT_OF_MEMORY,
RESULT_DETAIL("CallBack::CreateAndCopyData")),
__func__);
}
}
void
VideoCallbackAdapter::ReceivedDecodedReferenceFrame(const uint64_t aPictureId)
GMPVideoDecoder::ReceivedDecodedReferenceFrame(const uint64_t aPictureId)
{
MOZ_ASSERT(IsOnGMPThread());
}
void
VideoCallbackAdapter::ReceivedDecodedFrame(const uint64_t aPictureId)
GMPVideoDecoder::ReceivedDecodedFrame(const uint64_t aPictureId)
{
MOZ_ASSERT(IsOnGMPThread());
}
void
VideoCallbackAdapter::InputDataExhausted()
GMPVideoDecoder::InputDataExhausted()
{
MOZ_ASSERT(IsOnGMPThread());
mCallback->InputExhausted();
mDecodePromise.ResolveIfExists(mDecodedData, __func__);
mDecodedData.Clear();
}
void
VideoCallbackAdapter::DrainComplete()
GMPVideoDecoder::DrainComplete()
{
MOZ_ASSERT(IsOnGMPThread());
mCallback->DrainComplete();
mDrainPromise.ResolveIfExists(mDecodedData, __func__);
mDecodedData.Clear();
}
void
VideoCallbackAdapter::ResetComplete()
GMPVideoDecoder::ResetComplete()
{
MOZ_ASSERT(IsOnGMPThread());
mCallback->FlushComplete();
mFlushPromise.ResolveIfExists(true, __func__);
}
void
VideoCallbackAdapter::Error(GMPErr aErr)
GMPVideoDecoder::Error(GMPErr aErr)
{
MOZ_ASSERT(IsOnGMPThread());
mCallback->Error(MediaResult(aErr == GMPDecodeErr
? NS_ERROR_DOM_MEDIA_DECODE_ERR
: NS_ERROR_DOM_MEDIA_FATAL_ERR,
RESULT_DETAIL("GMPErr:%x", aErr)));
auto error = MediaResult(aErr == GMPDecodeErr ? NS_ERROR_DOM_MEDIA_DECODE_ERR
: NS_ERROR_DOM_MEDIA_FATAL_ERR,
RESULT_DETAIL("GMPErr:%x", aErr));
mDecodePromise.RejectIfExists(error, __func__);
mDrainPromise.RejectIfExists(error, __func__);
mFlushPromise.RejectIfExists(error, __func__);
}
void
VideoCallbackAdapter::Terminated()
GMPVideoDecoder::Terminated()
{
// Note that this *may* be called from the proxy thread also.
mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
RESULT_DETAIL("Video GMP decoder terminated.")));
}
GMPVideoDecoderParams::GMPVideoDecoderParams(const CreateDecoderParams& aParams)
: mConfig(aParams.VideoConfig())
, mTaskQueue(aParams.mTaskQueue)
, mCallback(nullptr)
, mAdapter(nullptr)
, mImageContainer(aParams.mImageContainer)
, mLayersBackend(aParams.GetLayersBackend())
, mCrashHelper(aParams.mCrashHelper)
{}
GMPVideoDecoderParams&
GMPVideoDecoderParams::WithCallback(MediaDataDecoderProxy* aWrapper)
{
MOZ_ASSERT(aWrapper);
MOZ_ASSERT(!mCallback); // Should only be called once per instance.
mCallback = aWrapper->Callback();
mAdapter = nullptr;
return *this;
}
GMPVideoDecoderParams&
GMPVideoDecoderParams::WithAdapter(VideoCallbackAdapter* aAdapter)
{
MOZ_ASSERT(aAdapter);
MOZ_ASSERT(!mAdapter); // Should only be called once per instance.
mCallback = aAdapter->Callback();
mAdapter = aAdapter;
return *this;
MOZ_ASSERT(IsOnGMPThread());
Error(GMPErr::GMPAbortedErr);
}
GMPVideoDecoder::GMPVideoDecoder(const GMPVideoDecoderParams& aParams)
: mConfig(aParams.mConfig)
, mCallback(aParams.mCallback)
, mGMP(nullptr)
, mHost(nullptr)
, mAdapter(aParams.mAdapter)
, mConvertNALUnitLengths(false)
, mCrashHelper(aParams.mCrashHelper)
, mImageContainer(aParams.mImageContainer)
{
MOZ_ASSERT(!mAdapter || mCallback == mAdapter->Callback());
if (!mAdapter) {
mAdapter = new VideoCallbackAdapter(mCallback,
VideoInfo(mConfig.mDisplay.width,
mConfig.mDisplay.height),
aParams.mImageContainer);
}
}
void
@ -190,16 +169,12 @@ GMPVideoDecoder::CreateFrame(MediaRawData* aSample)
GMPVideoFrame* ftmp = nullptr;
GMPErr err = mHost->CreateFrame(kGMPEncodedVideoFrame, &ftmp);
if (GMP_FAILED(err)) {
mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY,
RESULT_DETAIL("Host::CreateFrame:%x", err)));
return nullptr;
}
GMPUniquePtr<GMPVideoEncodedFrame> frame(static_cast<GMPVideoEncodedFrame*>(ftmp));
err = frame->CreateEmptyFrame(aSample->Size());
if (GMP_FAILED(err)) {
mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY,
RESULT_DETAIL("GMPVideoEncodedFrame::CreateEmptyFrame:%x", err)));
return nullptr;
}
@ -278,7 +253,7 @@ GMPVideoDecoder::GMPInitDone(GMPVideoDecoderProxy* aGMP, GMPVideoHost* aHost)
nsresult rv = aGMP->InitDecode(codec,
codecSpecific,
mAdapter,
this,
PR_GetNumberOfProcessors());
if (NS_FAILED(rv)) {
aGMP->Close();
@ -326,66 +301,85 @@ GMPVideoDecoder::Init()
return promise;
}
void
GMPVideoDecoder::Input(MediaRawData* aSample)
RefPtr<MediaDataDecoder::DecodePromise>
GMPVideoDecoder::Decode(MediaRawData* aSample)
{
MOZ_ASSERT(IsOnGMPThread());
RefPtr<MediaRawData> sample(aSample);
if (!mGMP) {
mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
RESULT_DETAIL("mGMP not initialized")));
return;
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
RESULT_DETAIL("mGMP not initialized")),
__func__);
}
mAdapter->SetLastStreamOffset(sample->mOffset);
mLastStreamOffset = sample->mOffset;
GMPUniquePtr<GMPVideoEncodedFrame> frame = CreateFrame(sample);
if (!frame) {
mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY,
RESULT_DETAIL("CreateFrame returned null")));
return;
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_OUT_OF_MEMORY,
RESULT_DETAIL("CreateFrame returned null")),
__func__);
}
RefPtr<DecodePromise> p = mDecodePromise.Ensure(__func__);
nsTArray<uint8_t> info; // No codec specific per-frame info to pass.
nsresult rv = mGMP->Decode(Move(frame), false, info, 0);
if (NS_FAILED(rv)) {
mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("mGMP->Decode:%x", rv)));
mDecodePromise.Reject(MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("mGMP->Decode:%x", rv)),
__func__);
}
return p;
}
void
RefPtr<MediaDataDecoder::FlushPromise>
GMPVideoDecoder::Flush()
{
MOZ_ASSERT(IsOnGMPThread());
mDecodePromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
mDrainPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
RefPtr<FlushPromise> p = mFlushPromise.Ensure(__func__);
if (!mGMP || NS_FAILED(mGMP->Reset())) {
// Abort the flush.
mCallback->FlushComplete();
mFlushPromise.Resolve(true, __func__);
}
return p;
}
void
RefPtr<MediaDataDecoder::DecodePromise>
GMPVideoDecoder::Drain()
{
MOZ_ASSERT(IsOnGMPThread());
MOZ_ASSERT(mDecodePromise.IsEmpty(), "Must wait for decoding to complete");
RefPtr<DecodePromise> p = mDrainPromise.Ensure(__func__);
if (!mGMP || NS_FAILED(mGMP->Drain())) {
mCallback->DrainComplete();
mDrainPromise.Resolve(DecodedData(), __func__);
}
return p;
}
void
RefPtr<ShutdownPromise>
GMPVideoDecoder::Shutdown()
{
mInitPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
mFlushPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
// Note that this *may* be called from the proxy thread also.
// TODO: If that's the case, then this code is racy.
if (!mGMP) {
return;
return ShutdownPromise::CreateAndResolve(true, __func__);
}
// Note this unblocks flush and drain operations waiting for callbacks.
mGMP->Close();
mGMP = nullptr;
return ShutdownPromise::CreateAndResolve(true, __func__);
}
} // namespace mozilla

Просмотреть файл

@ -16,20 +16,35 @@
namespace mozilla {
class VideoCallbackAdapter : public GMPVideoDecoderCallbackProxy {
public:
VideoCallbackAdapter(MediaDataDecoderCallbackProxy* aCallback,
VideoInfo aVideoInfo,
layers::ImageContainer* aImageContainer)
: mCallback(aCallback)
, mLastStreamOffset(0)
, mVideoInfo(aVideoInfo)
, mImageContainer(aImageContainer)
{}
struct GMPVideoDecoderParams
{
explicit GMPVideoDecoderParams(const CreateDecoderParams& aParams);
MediaDataDecoderCallbackProxy* Callback() const { return mCallback; }
const VideoInfo& mConfig;
TaskQueue* mTaskQueue;
layers::ImageContainer* mImageContainer;
layers::LayersBackend mLayersBackend;
RefPtr<GMPCrashHelper> mCrashHelper;
};
class GMPVideoDecoder : public MediaDataDecoder,
public GMPVideoDecoderCallbackProxy
{
public:
explicit GMPVideoDecoder(const GMPVideoDecoderParams& aParams);
RefPtr<InitPromise> Init() override;
RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
RefPtr<DecodePromise> Drain() override;
RefPtr<FlushPromise> Flush() override;
RefPtr<ShutdownPromise> Shutdown() override;
const char* GetDescriptionName() const override
{
return "GMP video decoder";
}
// GMPVideoDecoderCallbackProxy
// All those methods are called on the GMP thread.
void Decoded(GMPVideoi420Frame* aDecodedFrame) override;
void ReceivedDecodedReferenceFrame(const uint64_t aPictureId) override;
void ReceivedDecodedFrame(const uint64_t aPictureId) override;
@ -39,46 +54,6 @@ public:
void Error(GMPErr aErr) override;
void Terminated() override;
void SetLastStreamOffset(int64_t aStreamOffset) {
mLastStreamOffset = aStreamOffset;
}
private:
MediaDataDecoderCallbackProxy* mCallback;
int64_t mLastStreamOffset;
VideoInfo mVideoInfo;
RefPtr<layers::ImageContainer> mImageContainer;
};
struct GMPVideoDecoderParams {
explicit GMPVideoDecoderParams(const CreateDecoderParams& aParams);
GMPVideoDecoderParams& WithCallback(MediaDataDecoderProxy* aWrapper);
GMPVideoDecoderParams& WithAdapter(VideoCallbackAdapter* aAdapter);
const VideoInfo& mConfig;
TaskQueue* mTaskQueue;
MediaDataDecoderCallbackProxy* mCallback;
VideoCallbackAdapter* mAdapter;
layers::ImageContainer* mImageContainer;
layers::LayersBackend mLayersBackend;
RefPtr<GMPCrashHelper> mCrashHelper;
};
class GMPVideoDecoder : public MediaDataDecoder {
public:
explicit GMPVideoDecoder(const GMPVideoDecoderParams& aParams);
RefPtr<InitPromise> Init() override;
void Input(MediaRawData* aSample) override;
void Flush() override;
void Drain() override;
void Shutdown() override;
const char* GetDescriptionName() const override
{
return "GMP video decoder";
}
protected:
virtual void InitTags(nsTArray<nsCString>& aTags);
virtual nsCString GetNodeId();
@ -107,14 +82,20 @@ private:
void GMPInitDone(GMPVideoDecoderProxy* aGMP, GMPVideoHost* aHost);
const VideoInfo mConfig;
MediaDataDecoderCallbackProxy* mCallback;
nsCOMPtr<mozIGeckoMediaPluginService> mMPS;
GMPVideoDecoderProxy* mGMP;
GMPVideoHost* mHost;
nsAutoPtr<VideoCallbackAdapter> mAdapter;
bool mConvertNALUnitLengths;
MozPromiseHolder<InitPromise> mInitPromise;
RefPtr<GMPCrashHelper> mCrashHelper;
int64_t mLastStreamOffset = 0;
RefPtr<layers::ImageContainer> mImageContainer;
MozPromiseHolder<DecodePromise> mDecodePromise;
MozPromiseHolder<DecodePromise> mDrainPromise;
MozPromiseHolder<FlushPromise> mFlushPromise;
DecodedData mDecodedData;
};
} // namespace mozilla

Просмотреть файл

@ -6,85 +6,68 @@
#include "MediaDataDecoderProxy.h"
#include "MediaData.h"
#include "mozilla/SyncRunnable.h"
namespace mozilla {
void
MediaDataDecoderCallbackProxy::Error(const MediaResult& aError)
{
mProxyCallback->Error(aError);
}
void
MediaDataDecoderCallbackProxy::FlushComplete()
{
mProxyDecoder->FlushComplete();
}
RefPtr<MediaDataDecoder::InitPromise>
MediaDataDecoderProxy::InternalInit()
{
return mProxyDecoder->Init();
}
RefPtr<MediaDataDecoder::InitPromise>
MediaDataDecoderProxy::Init()
{
MOZ_ASSERT(!mIsShutdown);
return InvokeAsync(mProxyThread, this, __func__,
&MediaDataDecoderProxy::InternalInit);
RefPtr<MediaDataDecoderProxy> self = this;
return InvokeAsync(mProxyThread, __func__,
[self, this]() { return mProxyDecoder->Init(); });
}
void
MediaDataDecoderProxy::Input(MediaRawData* aSample)
RefPtr<MediaDataDecoder::DecodePromise>
MediaDataDecoderProxy::Decode(MediaRawData* aSample)
{
MOZ_ASSERT(!IsOnProxyThread());
MOZ_ASSERT(!mIsShutdown);
nsCOMPtr<nsIRunnable> task(new InputTask(mProxyDecoder, aSample));
mProxyThread->Dispatch(task.forget());
RefPtr<MediaDataDecoderProxy> self = this;
RefPtr<MediaRawData> sample = aSample;
return InvokeAsync(mProxyThread, __func__, [self, this, sample]() {
return mProxyDecoder->Decode(sample);
});
}
void
RefPtr<MediaDataDecoder::FlushPromise>
MediaDataDecoderProxy::Flush()
{
MOZ_ASSERT(!IsOnProxyThread());
MOZ_ASSERT(!mIsShutdown);
mFlushComplete.Set(false);
mProxyThread->Dispatch(NewRunnableMethod(mProxyDecoder, &MediaDataDecoder::Flush));
mFlushComplete.WaitUntil(true);
RefPtr<MediaDataDecoderProxy> self = this;
return InvokeAsync(mProxyThread, __func__,
[self, this]() { return mProxyDecoder->Flush(); });
}
void
RefPtr<MediaDataDecoder::DecodePromise>
MediaDataDecoderProxy::Drain()
{
MOZ_ASSERT(!IsOnProxyThread());
MOZ_ASSERT(!mIsShutdown);
mProxyThread->Dispatch(NewRunnableMethod(mProxyDecoder, &MediaDataDecoder::Drain));
RefPtr<MediaDataDecoderProxy> self = this;
return InvokeAsync(mProxyThread, __func__,
[self, this]() { return mProxyDecoder->Drain(); });
}
void
RefPtr<ShutdownPromise>
MediaDataDecoderProxy::Shutdown()
{
MOZ_ASSERT(!IsOnProxyThread());
// Note that this *may* be called from the proxy thread also.
MOZ_ASSERT(!mIsShutdown);
#if defined(DEBUG)
mIsShutdown = true;
#endif
mProxyThread->AsEventTarget()->Dispatch(NewRunnableMethod(mProxyDecoder,
&MediaDataDecoder::Shutdown),
NS_DISPATCH_SYNC);
}
void
MediaDataDecoderProxy::FlushComplete()
{
mFlushComplete.Set(true);
RefPtr<MediaDataDecoderProxy> self = this;
return InvokeAsync(mProxyThread, __func__,
[self, this]() { return mProxyDecoder->Shutdown(); });
}
} // namespace mozilla

Просмотреть файл

@ -8,6 +8,7 @@
#define MediaDataDecoderProxy_h_
#include "PlatformDecoderModule.h"
#include "mozilla/Atomics.h"
#include "mozilla/RefPtr.h"
#include "nsThreadUtils.h"
#include "nscore.h"
@ -15,120 +16,17 @@
namespace mozilla {
class InputTask : public Runnable {
class MediaDataDecoderProxy : public MediaDataDecoder
{
public:
InputTask(MediaDataDecoder* aDecoder,
MediaRawData* aSample)
: mDecoder(aDecoder)
, mSample(aSample)
{}
NS_IMETHOD Run() override {
mDecoder->Input(mSample);
return NS_OK;
}
private:
RefPtr<MediaDataDecoder> mDecoder;
RefPtr<MediaRawData> mSample;
};
template<typename T>
class Condition {
public:
explicit Condition(T aValue)
: mMonitor("Condition")
, mCondition(aValue)
{}
void Set(T aValue) {
MonitorAutoLock mon(mMonitor);
mCondition = aValue;
mon.NotifyAll();
}
void WaitUntil(T aValue) {
MonitorAutoLock mon(mMonitor);
while (mCondition != aValue) {
mon.Wait();
}
}
private:
Monitor mMonitor;
T mCondition;
};
class MediaDataDecoderProxy;
class MediaDataDecoderCallbackProxy : public MediaDataDecoderCallback {
public:
MediaDataDecoderCallbackProxy(MediaDataDecoderProxy* aProxyDecoder,
MediaDataDecoderCallback* aCallback)
: mProxyDecoder(aProxyDecoder)
, mProxyCallback(aCallback)
{
}
void Output(MediaData* aData) override {
mProxyCallback->Output(aData);
}
void Error(const MediaResult& aError) override;
void InputExhausted() override {
mProxyCallback->InputExhausted();
}
void DrainComplete() override {
mProxyCallback->DrainComplete();
}
void ReleaseMediaResources() override {
mProxyCallback->ReleaseMediaResources();
}
void FlushComplete();
bool OnReaderTaskQueue() override
{
return mProxyCallback->OnReaderTaskQueue();
}
void WaitingForKey() override
{
mProxyCallback->WaitingForKey();
}
private:
MediaDataDecoderProxy* mProxyDecoder;
MediaDataDecoderCallback* mProxyCallback;
};
class MediaDataDecoderProxy : public MediaDataDecoder {
public:
MediaDataDecoderProxy(already_AddRefed<AbstractThread> aProxyThread,
MediaDataDecoderCallback* aCallback)
explicit MediaDataDecoderProxy(already_AddRefed<AbstractThread> aProxyThread)
: mProxyThread(aProxyThread)
, mProxyCallback(this, aCallback)
, mFlushComplete(false)
#if defined(DEBUG)
, mIsShutdown(false)
#endif
{
}
// Ideally, this would return a regular MediaDataDecoderCallback pointer
// to retain the clean abstraction, but until MediaDataDecoderCallback
// supports the FlushComplete interface, this will have to do. When MDDC
// supports FlushComplete, this, the GMP*Decoders, and the
// *CallbackAdapters can be reverted to accepting a regular
// MediaDataDecoderCallback pointer.
MediaDataDecoderCallbackProxy* Callback()
{
return &mProxyCallback;
}
void SetProxyTarget(MediaDataDecoder* aProxyDecoder)
{
MOZ_ASSERT(aProxyDecoder);
@ -136,43 +34,33 @@ public:
}
// These are called from the decoder thread pool.
// Init and Shutdown run synchronously on the proxy thread, all others are
// asynchronously and responded to via the MediaDataDecoderCallback.
// Note: the nsresults returned by the proxied decoder are lost.
// Shutdown run synchronously on the proxy thread, all others are
// asynchronous.
RefPtr<InitPromise> Init() override;
void Input(MediaRawData* aSample) override;
void Flush() override;
void Drain() override;
void Shutdown() override;
RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
RefPtr<DecodePromise> Drain() override;
RefPtr<FlushPromise> Flush() override;
RefPtr<ShutdownPromise> Shutdown() override;
const char* GetDescriptionName() const override
{
return "GMP proxy data decoder";
}
// Called by MediaDataDecoderCallbackProxy.
void FlushComplete();
private:
RefPtr<InitPromise> InternalInit();
#ifdef DEBUG
bool IsOnProxyThread() {
bool IsOnProxyThread()
{
return mProxyThread && mProxyThread->IsCurrentThreadIn();
}
#endif
friend class InputTask;
friend class InitTask;
RefPtr<MediaDataDecoder> mProxyDecoder;
RefPtr<AbstractThread> mProxyThread;
MediaDataDecoderCallbackProxy mProxyCallback;
Condition<bool> mFlushComplete;
#if defined(DEBUG)
bool mIsShutdown;
Atomic<bool> mIsShutdown;
#endif
};

Просмотреть файл

@ -53,7 +53,8 @@ GetFeatureStatus(int32_t aFeature)
nsCOMPtr<nsIGfxInfo> gfxInfo = services::GetGfxInfo();
int32_t status = nsIGfxInfo::FEATURE_STATUS_UNKNOWN;
nsCString discardFailureId;
if (!gfxInfo || NS_FAILED(gfxInfo->GetFeatureStatus(aFeature, discardFailureId, &status))) {
if (!gfxInfo || NS_FAILED(gfxInfo->GetFeatureStatus(
aFeature, discardFailureId, &status))) {
return false;
}
return status == nsIGfxInfo::FEATURE_STATUS_OK;
@ -72,8 +73,8 @@ GetCryptoInfoFromSample(const MediaRawData* aSample)
nsresult rv = CryptoInfo::New(&cryptoInfo);
NS_ENSURE_SUCCESS(rv, nullptr);
uint32_t numSubSamples =
std::min<uint32_t>(cryptoObj.mPlainSizes.Length(), cryptoObj.mEncryptedSizes.Length());
uint32_t numSubSamples = std::min<uint32_t>(
cryptoObj.mPlainSizes.Length(), cryptoObj.mEncryptedSizes.Length());
uint32_t totalSubSamplesSize = 0;
for (auto& size : cryptoObj.mEncryptedSizes) {
@ -105,19 +106,16 @@ GetCryptoInfoFromSample(const MediaRawData* aSample)
reinterpret_cast<int32_t*>(&plainSizes[0]),
plainSizes.Length());
auto numBytesOfEncryptedData =
mozilla::jni::IntArray::New(reinterpret_cast<const int32_t*>(&cryptoObj.mEncryptedSizes[0]),
cryptoObj.mEncryptedSizes.Length());
auto numBytesOfEncryptedData = mozilla::jni::IntArray::New(
reinterpret_cast<const int32_t*>(&cryptoObj.mEncryptedSizes[0]),
cryptoObj.mEncryptedSizes.Length());
auto iv = mozilla::jni::ByteArray::New(reinterpret_cast<int8_t*>(&tempIV[0]),
tempIV.Length());
auto keyId = mozilla::jni::ByteArray::New(reinterpret_cast<const int8_t*>(&cryptoObj.mKeyId[0]),
cryptoObj.mKeyId.Length());
cryptoInfo->Set(numSubSamples,
numBytesOfPlainData,
numBytesOfEncryptedData,
keyId,
iv,
MediaCodec::CRYPTO_MODE_AES_CTR);
tempIV.Length());
auto keyId = mozilla::jni::ByteArray::New(
reinterpret_cast<const int8_t*>(&cryptoObj.mKeyId[0]),
cryptoObj.mKeyId.Length());
cryptoInfo->Set(numSubSamples, numBytesOfPlainData, numBytesOfEncryptedData,
keyId, iv, MediaCodec::CRYPTO_MODE_AES_CTR);
return cryptoInfo;
}
@ -168,7 +166,7 @@ AndroidDecoderModule::SupportsMimeType(const nsACString& aMimeType,
}
return java::HardwareCodecCapabilityUtils::FindDecoderCodecInfoForMimeType(
nsCString(TranslateMimeType(aMimeType)));
nsCString(TranslateMimeType(aMimeType)));
}
already_AddRefed<MediaDataDecoder>
@ -195,21 +193,13 @@ AndroidDecoderModule::CreateVideoDecoder(const CreateDecoderParams& aParams)
drmStubId = mProxy->GetMediaDrmStubId();
}
RefPtr<MediaDataDecoder> decoder = MediaPrefs::PDMAndroidRemoteCodecEnabled() ?
RemoteDataDecoder::CreateVideoDecoder(config,
format,
aParams.mCallback,
aParams.mImageContainer,
drmStubId,
mProxy,
aParams.mTaskQueue) :
MediaCodecDataDecoder::CreateVideoDecoder(config,
format,
aParams.mCallback,
aParams.mImageContainer,
drmStubId,
mProxy,
aParams.mTaskQueue);
RefPtr<MediaDataDecoder> decoder =
MediaPrefs::PDMAndroidRemoteCodecEnabled()
? RemoteDataDecoder::CreateVideoDecoder(
config, format, aParams.mImageContainer, drmStubId, mProxy,
aParams.mTaskQueue)
: MediaCodecDataDecoder::CreateVideoDecoder(
config, format, aParams.mImageContainer, drmStubId, mProxy);
return decoder.forget();
}
@ -237,19 +227,12 @@ AndroidDecoderModule::CreateAudioDecoder(const CreateDecoderParams& aParams)
if (mProxy) {
drmStubId = mProxy->GetMediaDrmStubId();
}
RefPtr<MediaDataDecoder> decoder = MediaPrefs::PDMAndroidRemoteCodecEnabled() ?
RemoteDataDecoder::CreateAudioDecoder(config,
format,
aParams.mCallback,
drmStubId,
mProxy,
aParams.mTaskQueue) :
MediaCodecDataDecoder::CreateAudioDecoder(config,
format,
aParams.mCallback,
drmStubId,
mProxy,
aParams.mTaskQueue);
RefPtr<MediaDataDecoder> decoder =
MediaPrefs::PDMAndroidRemoteCodecEnabled()
? RemoteDataDecoder::CreateAudioDecoder(config, format, drmStubId, mProxy,
aParams.mTaskQueue)
: MediaCodecDataDecoder::CreateAudioDecoder(config, format, drmStubId,
mProxy);
return decoder.forget();
}

Просмотреть файл

@ -10,7 +10,8 @@
namespace mozilla {
class AndroidDecoderModule : public PlatformDecoderModule {
class AndroidDecoderModule : public PlatformDecoderModule
{
public:
already_AddRefed<MediaDataDecoder>
CreateVideoDecoder(const CreateDecoderParams& aParams) override;

Просмотреть файл

@ -34,13 +34,6 @@ using media::TimeUnit;
namespace mozilla {
#define INVOKE_CALLBACK(Func, ...) \
if (mCallback) { \
mCallback->Func(__VA_ARGS__); \
} else { \
NS_WARNING("Callback not set"); \
}
static MediaCodec::LocalRef
CreateDecoder(const nsACString& aMimeType)
{
@ -55,11 +48,10 @@ class VideoDataDecoder : public MediaCodecDataDecoder
public:
VideoDataDecoder(const VideoInfo& aConfig,
MediaFormat::Param aFormat,
MediaDataDecoderCallback* aCallback,
layers::ImageContainer* aImageContainer,
const nsString& aDrmStubId)
: MediaCodecDataDecoder(MediaData::Type::VIDEO_DATA, aConfig.mMimeType,
aFormat, aCallback, aDrmStubId)
aFormat, aDrmStubId)
, mImageContainer(aImageContainer)
, mConfig(aConfig)
{
@ -119,11 +111,18 @@ public:
gfx::IntRect(0, 0,
mConfig.mDisplay.width,
mConfig.mDisplay.height));
INVOKE_CALLBACK(Output, v);
if (!v) {
return NS_ERROR_OUT_OF_MEMORY;
}
MonitorAutoLock mon(mMonitor);
mDecodedData.AppendElement(Move(v));
return NS_OK;
}
bool SupportDecoderRecycling() const override { return mIsCodecSupportAdaptivePlayback; }
bool SupportDecoderRecycling() const override
{
return mIsCodecSupportAdaptivePlayback;
}
protected:
layers::ImageContainer* mImageContainer;
@ -131,56 +130,13 @@ protected:
RefPtr<AndroidSurfaceTexture> mSurfaceTexture;
};
class EMEVideoDataDecoder : public VideoDataDecoder {
public:
EMEVideoDataDecoder(const VideoInfo& aConfig,
MediaFormat::Param aFormat,
MediaDataDecoderCallback* aCallback,
layers::ImageContainer* aImageContainer,
const nsString& aDrmStubId,
CDMProxy* aProxy,
TaskQueue* aTaskQueue)
: VideoDataDecoder(aConfig, aFormat, aCallback, aImageContainer, aDrmStubId)
, mSamplesWaitingForKey(new SamplesWaitingForKey(this, aCallback,
aTaskQueue, aProxy))
{
}
void Input(MediaRawData* aSample) override;
void Shutdown() override;
private:
RefPtr<SamplesWaitingForKey> mSamplesWaitingForKey;
};
void
EMEVideoDataDecoder::Input(MediaRawData* aSample)
{
if (mSamplesWaitingForKey->WaitIfKeyNotUsable(aSample)) {
return;
}
VideoDataDecoder::Input(aSample);
}
void
EMEVideoDataDecoder::Shutdown()
{
VideoDataDecoder::Shutdown();
mSamplesWaitingForKey->BreakCycles();
mSamplesWaitingForKey = nullptr;
}
class AudioDataDecoder : public MediaCodecDataDecoder
{
public:
AudioDataDecoder(const AudioInfo& aConfig, MediaFormat::Param aFormat,
MediaDataDecoderCallback* aCallback,
const nsString& aDrmStubId)
: MediaCodecDataDecoder(MediaData::Type::AUDIO_DATA, aConfig.mMimeType,
aFormat, aCallback, aDrmStubId)
aFormat, aDrmStubId)
{
JNIEnv* const env = jni::GetEnvForThread();
@ -203,7 +159,7 @@ public:
}
nsresult Output(BufferInfo::Param aInfo, void* aBuffer,
MediaFormat::Param aFormat, const TimeUnit& aDuration)
MediaFormat::Param aFormat, const TimeUnit& aDuration) override
{
// The output on Android is always 16-bit signed
nsresult rv;
@ -250,104 +206,59 @@ public:
Move(audio),
numChannels,
sampleRate);
INVOKE_CALLBACK(Output, data);
MonitorAutoLock mon(mMonitor);
mDecodedData.AppendElement(Move(data));
return NS_OK;
}
};
class EMEAudioDataDecoder : public AudioDataDecoder {
public:
EMEAudioDataDecoder(const AudioInfo& aConfig, MediaFormat::Param aFormat,
MediaDataDecoderCallback* aCallback, const nsString& aDrmStubId,
CDMProxy* aProxy, TaskQueue* aTaskQueue)
: AudioDataDecoder(aConfig, aFormat, aCallback, aDrmStubId)
, mSamplesWaitingForKey(new SamplesWaitingForKey(this, aCallback,
aTaskQueue, aProxy))
{
}
void Input(MediaRawData* aSample) override;
void Shutdown() override;
private:
RefPtr<SamplesWaitingForKey> mSamplesWaitingForKey;
};
void
EMEAudioDataDecoder::Input(MediaRawData* aSample)
{
if (mSamplesWaitingForKey->WaitIfKeyNotUsable(aSample)) {
return;
}
AudioDataDecoder::Input(aSample);
}
void
EMEAudioDataDecoder::Shutdown()
{
AudioDataDecoder::Shutdown();
mSamplesWaitingForKey->BreakCycles();
mSamplesWaitingForKey = nullptr;
}
MediaDataDecoder*
already_AddRefed<MediaDataDecoder>
MediaCodecDataDecoder::CreateAudioDecoder(const AudioInfo& aConfig,
java::sdk::MediaFormat::Param aFormat,
MediaDataDecoderCallback* aCallback,
const nsString& aDrmStubId,
CDMProxy* aProxy,
TaskQueue* aTaskQueue)
CDMProxy* aProxy)
{
RefPtr<MediaDataDecoder> decoder;
if (!aProxy) {
return new AudioDataDecoder(aConfig, aFormat, aCallback, aDrmStubId);
decoder = new AudioDataDecoder(aConfig, aFormat, aDrmStubId);
} else {
return new EMEAudioDataDecoder(aConfig,
aFormat,
aCallback,
aDrmStubId,
aProxy,
aTaskQueue);
// TODO in bug 1334061.
}
return decoder.forget();
}
MediaDataDecoder*
already_AddRefed<MediaDataDecoder>
MediaCodecDataDecoder::CreateVideoDecoder(const VideoInfo& aConfig,
java::sdk::MediaFormat::Param aFormat,
MediaDataDecoderCallback* aCallback,
layers::ImageContainer* aImageContainer,
const nsString& aDrmStubId,
CDMProxy* aProxy,
TaskQueue* aTaskQueue)
CDMProxy* aProxy)
{
RefPtr<MediaDataDecoder> decoder;
if (!aProxy) {
return new VideoDataDecoder(aConfig, aFormat, aCallback, aImageContainer, aDrmStubId);
decoder = new VideoDataDecoder(aConfig, aFormat, aImageContainer, aDrmStubId);
} else {
return new EMEVideoDataDecoder(aConfig,
aFormat,
aCallback,
aImageContainer,
aDrmStubId,
aProxy,
aTaskQueue);
// TODO in bug 1334061.
}
return decoder.forget();
}
MediaCodecDataDecoder::MediaCodecDataDecoder(MediaData::Type aType,
const nsACString& aMimeType,
MediaFormat::Param aFormat,
MediaDataDecoderCallback* aCallback,
const nsString& aDrmStubId)
: mType(aType)
, mMimeType(aMimeType)
, mFormat(aFormat)
, mCallback(aCallback)
, mInputBuffers(nullptr)
, mOutputBuffers(nullptr)
, mError(false)
, mMonitor("MediaCodecDataDecoder::mMonitor")
, mState(ModuleState::kDecoding)
, mDrmStubId(aDrmStubId)
{
mDecodePromise.SetMonitor(&mMonitor);
mDrainPromise.SetMonitor(&mMonitor);
}
MediaCodecDataDecoder::~MediaCodecDataDecoder()
@ -364,10 +275,9 @@ MediaCodecDataDecoder::Init()
(mType == MediaData::AUDIO_DATA ? TrackInfo::TrackType::kAudioTrack
: TrackInfo::TrackType::kVideoTrack);
return NS_SUCCEEDED(rv) ?
InitPromise::CreateAndResolve(type, __func__) :
InitPromise::CreateAndReject(
NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
return NS_SUCCEEDED(rv) ? InitPromise::CreateAndResolve(type, __func__)
: InitPromise::CreateAndReject(
NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
}
nsresult
@ -376,8 +286,6 @@ MediaCodecDataDecoder::InitDecoder(Surface::Param aSurface)
mDecoder = CreateDecoder(mMimeType);
if (!mDecoder) {
INVOKE_CALLBACK(Error,
MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__));
return NS_ERROR_FAILURE;
}
@ -395,7 +303,8 @@ MediaCodecDataDecoder::InitDecoder(Surface::Param aSurface)
MediaCrypto::LocalRef crypto = MediaDrmProxy::GetMediaCrypto(mDrmStubId);
bool hascrypto = !!crypto;
LOG("Has(%d) MediaCrypto (%s)", hascrypto, NS_ConvertUTF16toUTF8(mDrmStubId).get());
LOG("Has(%d) MediaCrypto (%s)", hascrypto,
NS_ConvertUTF16toUTF8(mDrmStubId).get());
nsresult rv;
NS_ENSURE_SUCCESS(rv = mDecoder->Configure(mFormat, aSurface, crypto, 0), rv);
NS_ENSURE_SUCCESS(rv = mDecoder->Start(), rv);
@ -403,7 +312,8 @@ MediaCodecDataDecoder::InitDecoder(Surface::Param aSurface)
NS_ENSURE_SUCCESS(rv = ResetInputBuffers(), rv);
NS_ENSURE_SUCCESS(rv = ResetOutputBuffers(), rv);
nsCOMPtr<nsIRunnable> r = NewRunnableMethod(this, &MediaCodecDataDecoder::DecoderLoop);
nsCOMPtr<nsIRunnable> r =
NewRunnableMethod(this, &MediaCodecDataDecoder::DecoderLoop);
rv = NS_NewNamedThread("MC Decoder", getter_AddRefs(mThread), r);
return rv;
@ -412,15 +322,36 @@ MediaCodecDataDecoder::InitDecoder(Surface::Param aSurface)
// This is in usec, so that's 10ms.
static const int64_t kDecoderTimeout = 10000;
#define BREAK_ON_DECODER_ERROR() \
if (NS_FAILED(res)) { \
NS_WARNING("Exiting decoder loop due to exception"); \
if (mState == ModuleState::kDrainDecoder) { \
INVOKE_CALLBACK(DrainComplete); \
SetState(ModuleState::kDecoding); \
} \
INVOKE_CALLBACK(Error, MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__)); \
break; \
#define BREAK_ON_DECODER_ERROR_LOCKED() \
if (NS_FAILED(res)) { \
mError = true; \
mMonitor.AssertCurrentThreadOwns(); \
NS_WARNING("Exiting decoder loop due to exception"); \
if (mState == ModuleState::kDrainDecoder) { \
mDrainPromise.RejectIfExists( \
MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__), __func__); \
SetState(ModuleState::kDecoding); \
break; \
} \
mDecodePromise.RejectIfExists( \
MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__), __func__); \
break; \
}
#define BREAK_ON_DECODER_ERROR() \
if (NS_FAILED(res)) { \
mError = true; \
MonitorAutoLock mon(mMonitor); \
NS_WARNING("Exiting decoder loop due to exception"); \
if (mState == ModuleState::kDrainDecoder) { \
mDrainPromise.RejectIfExists( \
MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__), __func__); \
SetState(ModuleState::kDecoding); \
break; \
} \
mDecodePromise.RejectIfExists( \
MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__), __func__); \
break; \
}
nsresult
@ -451,9 +382,10 @@ MediaCodecDataDecoder::WaitForInput()
{
MonitorAutoLock lock(mMonitor);
while (mState == ModuleState::kDecoding && mQueue.empty()) {
// Signal that we require more input.
INVOKE_CALLBACK(InputExhausted);
while (mState == ModuleState::kDecoding && mQueue.empty()) {
// We're done processing the current sample.
mDecodePromise.ResolveIfExists(mDecodedData, __func__);
mDecodedData.Clear();
lock.Wait();
}
@ -562,9 +494,10 @@ MediaCodecDataDecoder::HandleEOS(int32_t aOutputStatus)
if (mState == ModuleState::kDrainWaitEOS) {
SetState(ModuleState::kDecoding);
mMonitor.Notify();
INVOKE_CALLBACK(DrainComplete);
mDrainPromise.ResolveIfExists(mDecodedData, __func__);
mDecodedData.Clear();
mMonitor.Notify();
}
mDecoder->ReleaseOutputBuffer(aOutputStatus, false);
@ -606,9 +539,7 @@ MediaCodecDataDecoder::ProcessOutput(
// The Surface will be updated at this point (for video).
mDecoder->ReleaseOutputBuffer(aStatus, true);
PostOutput(aInfo, aFormat, duration.value());
return NS_OK;
return PostOutput(aInfo, aFormat, duration.value());
}
void
@ -627,7 +558,7 @@ MediaCodecDataDecoder::DecoderLoop()
if (mState == ModuleState::kDrainDecoder) {
MOZ_ASSERT(!sample, "Shouldn't have a sample when pushing EOF frame");
res = QueueEOS();
BREAK_ON_DECODER_ERROR();
BREAK_ON_DECODER_ERROR_LOCKED();
}
}
@ -656,8 +587,6 @@ MediaCodecDataDecoder::DecoderLoop()
BREAK_ON_DECODER_ERROR();
if (outputStatus == MediaCodec::INFO_TRY_AGAIN_LATER) {
// We might want to call mCallback->InputExhausted() here, but there seems
// to be some possible bad interactions here with the threading.
} else if (outputStatus == MediaCodec::INFO_OUTPUT_BUFFERS_CHANGED) {
res = ResetOutputBuffers();
BREAK_ON_DECODER_ERROR();
@ -666,9 +595,13 @@ MediaCodecDataDecoder::DecoderLoop()
BREAK_ON_DECODER_ERROR();
} else if (outputStatus < 0) {
NS_WARNING("Unknown error from decoder!");
INVOKE_CALLBACK(Error,
MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
__func__));
{
const auto result =
MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR, __func__);
MonitorAutoLock mon(mMonitor);
mDecodePromise.RejectIfExists(result, __func__);
mDrainPromise.RejectIfExists(result, __func__);
}
// Don't break here just in case it's recoverable. If it's not, other
// stuff will fail later and we'll bail out.
} else {
@ -715,6 +648,8 @@ MediaCodecDataDecoder::ModuleStateStr(ModuleState aState) {
bool
MediaCodecDataDecoder::SetState(ModuleState aState)
{
mMonitor.AssertCurrentThreadOwns();
bool ok = true;
if (mState == ModuleState::kShutdown) {
@ -744,14 +679,21 @@ MediaCodecDataDecoder::ClearQueue()
mQueue.clear();
mDurations.clear();
mDecodedData.Clear();
}
void
MediaCodecDataDecoder::Input(MediaRawData* aSample)
RefPtr<MediaDataDecoder::DecodePromise>
MediaCodecDataDecoder::Decode(MediaRawData* aSample)
{
if (mError) {
return DecodePromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR,
__func__);
}
MonitorAutoLock lock(mMonitor);
RefPtr<DecodePromise> p = mDecodePromise.Ensure(__func__);
mQueue.push_back(aSample);
lock.NotifyAll();
return p;
}
nsresult
@ -766,35 +708,41 @@ MediaCodecDataDecoder::ResetOutputBuffers()
return mDecoder->GetOutputBuffers(ReturnTo(&mOutputBuffers));
}
void
RefPtr<MediaDataDecoder::FlushPromise>
MediaCodecDataDecoder::Flush()
{
MonitorAutoLock lock(mMonitor);
mDecodePromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
mDrainPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
if (!SetState(ModuleState::kFlushing)) {
return;
return FlushPromise::CreateAndResolve(true, __func__);
}
lock.Notify();
while (mState == ModuleState::kFlushing) {
lock.Wait();
}
return FlushPromise::CreateAndResolve(true, __func__);
}
void
RefPtr<MediaDataDecoder::DecodePromise>
MediaCodecDataDecoder::Drain()
{
MonitorAutoLock lock(mMonitor);
if (mState == ModuleState::kDrainDecoder ||
mState == ModuleState::kDrainQueue) {
return;
if (mError) {
return DecodePromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR,
__func__);
}
MonitorAutoLock lock(mMonitor);
RefPtr<DecodePromise> p = mDecodePromise.Ensure(__func__);
MOZ_ASSERT(mState != ModuleState::kDrainDecoder
&& mState != ModuleState::kDrainQueue, "Already draining");
SetState(ModuleState::kDrainQueue);
lock.Notify();
return p;
}
void
RefPtr<ShutdownPromise>
MediaCodecDataDecoder::Shutdown()
{
MonitorAutoLock lock(mMonitor);
@ -816,6 +764,8 @@ MediaCodecDataDecoder::Shutdown()
mDecoder->Release();
mDecoder = nullptr;
}
return ShutdownPromise::CreateAndResolve(true, __func__);
}
} // mozilla

Просмотреть файл

@ -10,6 +10,7 @@
#include "MediaCodec.h"
#include "SurfaceTexture.h"
#include "TimeUnits.h"
#include "mozilla/Atomics.h"
#include "mozilla/Monitor.h"
#include "mozilla/Maybe.h"
@ -19,37 +20,33 @@ namespace mozilla {
typedef std::deque<RefPtr<MediaRawData>> SampleQueue;
class MediaCodecDataDecoder : public MediaDataDecoder {
class MediaCodecDataDecoder : public MediaDataDecoder
{
public:
static MediaDataDecoder* CreateAudioDecoder(const AudioInfo& aConfig,
java::sdk::MediaFormat::Param aFormat,
MediaDataDecoderCallback* aCallback,
const nsString& aDrmStubId,
CDMProxy* aProxy,
TaskQueue* aTaskQueue);
static already_AddRefed<MediaDataDecoder> CreateAudioDecoder(
const AudioInfo& aConfig, java::sdk::MediaFormat::Param aFormat,
const nsString& aDrmStubId, CDMProxy* aProxy);
static MediaDataDecoder* CreateVideoDecoder(const VideoInfo& aConfig,
java::sdk::MediaFormat::Param aFormat,
MediaDataDecoderCallback* aCallback,
layers::ImageContainer* aImageContainer,
const nsString& aDrmStubId,
CDMProxy* aProxy,
TaskQueue* aTaskQueue);
static already_AddRefed<MediaDataDecoder> CreateVideoDecoder(
const VideoInfo& aConfig, java::sdk::MediaFormat::Param aFormat,
layers::ImageContainer* aImageContainer, const nsString& aDrmStubId,
CDMProxy* aProxy);
virtual ~MediaCodecDataDecoder();
~MediaCodecDataDecoder();
RefPtr<MediaDataDecoder::InitPromise> Init() override;
void Flush() override;
void Drain() override;
void Shutdown() override;
void Input(MediaRawData* aSample) override;
RefPtr<InitPromise> Init() override;
RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
RefPtr<DecodePromise> Drain() override;
RefPtr<FlushPromise> Flush() override;
RefPtr<ShutdownPromise> Shutdown() override;
const char* GetDescriptionName() const override
{
return "Android MediaCodec decoder";
}
protected:
enum class ModuleState : uint8_t {
enum class ModuleState : uint8_t
{
kDecoding = 0,
kFlushing,
kDrainQueue,
@ -64,7 +61,6 @@ protected:
MediaCodecDataDecoder(MediaData::Type aType,
const nsACString& aMimeType,
java::sdk::MediaFormat::Param aFormat,
MediaDataDecoderCallback* aCallback,
const nsString& aDrmStubId);
static const char* ModuleStateStr(ModuleState aState);
@ -99,6 +95,7 @@ protected:
java::sdk::MediaFormat::Param aFormat,
int32_t aStatus);
// Sets decoder state and returns whether the new state has become effective.
// Must hold the monitor.
bool SetState(ModuleState aState);
void DecoderLoop();
@ -109,8 +106,6 @@ protected:
nsAutoCString mMimeType;
java::sdk::MediaFormat::GlobalRef mFormat;
MediaDataDecoderCallback* mCallback;
java::sdk::MediaCodec::GlobalRef mDecoder;
jni::ObjectArray::GlobalRef mInputBuffers;
@ -118,6 +113,8 @@ protected:
nsCOMPtr<nsIThread> mThread;
Atomic<bool> mError;
// Only these members are protected by mMonitor.
Monitor mMonitor;
@ -130,6 +127,10 @@ protected:
nsString mDrmStubId;
bool mIsCodecSupportAdaptivePlayback = false;
MozPromiseHolder<DecodePromise> mDecodePromise;
MozPromiseHolder<DecodePromise> mDrainPromise;
DecodedData mDecodedData;
};
} // namespace mozilla

Просмотреть файл

@ -19,9 +19,9 @@
#include "prlog.h"
#include <deque>
#include <jni.h>
#include <deque>
#undef LOG
#define LOG(arg, ...) MOZ_LOG(sAndroidDecoderModuleLog, \
@ -43,18 +43,16 @@ public:
typedef CodecProxy::NativeCallbacks::Natives<JavaCallbacksSupport> Base;
using Base::AttachNative;
JavaCallbacksSupport(MediaDataDecoderCallback* aDecoderCallback)
: mDecoderCallback(aDecoderCallback)
{
MOZ_ASSERT(aDecoderCallback);
}
JavaCallbacksSupport() : mCanceled(false) { }
virtual ~JavaCallbacksSupport() {}
virtual ~JavaCallbacksSupport() { }
virtual void HandleInputExhausted() = 0;
void OnInputExhausted()
{
if (mDecoderCallback) {
mDecoderCallback->InputExhausted();
if (!mCanceled) {
HandleInputExhausted();
}
}
@ -62,26 +60,28 @@ public:
void OnOutput(jni::Object::Param aSample)
{
if (mDecoderCallback) {
if (!mCanceled) {
HandleOutput(Sample::Ref::From(aSample));
}
}
virtual void HandleOutputFormatChanged(MediaFormat::Param aFormat) {};
virtual void HandleOutputFormatChanged(MediaFormat::Param aFormat) { };
void OnOutputFormatChanged(jni::Object::Param aFormat)
{
if (mDecoderCallback) {
if (!mCanceled) {
HandleOutputFormatChanged(MediaFormat::Ref::From(aFormat));
}
}
virtual void HandleError(const MediaResult& aError) = 0;
void OnError(bool aIsFatal)
{
if (mDecoderCallback) {
mDecoderCallback->Error(aIsFatal ?
MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__) :
MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR, __func__));
if (!mCanceled) {
HandleError(
aIsFatal ? MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__)
: MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR, __func__));
}
}
@ -92,25 +92,27 @@ public:
void Cancel()
{
mDecoderCallback = nullptr;
mCanceled = true;
}
protected:
MediaDataDecoderCallback* mDecoderCallback;
private:
Atomic<bool> mCanceled;
};
class RemoteVideoDecoder : public RemoteDataDecoder
{
public:
// Hold an output buffer and render it to the surface when the frame is sent to compositor, or
// release it if not presented.
// Hold an output buffer and render it to the surface when the frame is sent
// to compositor, or release it if not presented.
class RenderOrReleaseOutput : public VideoData::Listener
{
public:
RenderOrReleaseOutput(java::CodecProxy::Param aCodec, java::Sample::Param aSample)
: mCodec(aCodec),
mSample(aSample)
{}
RenderOrReleaseOutput(java::CodecProxy::Param aCodec,
java::Sample::Param aSample)
: mCodec(aCodec)
, mSample(aSample)
{
}
~RenderOrReleaseOutput()
{
@ -139,12 +141,12 @@ public:
class CallbacksSupport final : public JavaCallbacksSupport
{
public:
CallbacksSupport(RemoteVideoDecoder* aDecoder, MediaDataDecoderCallback* aCallback)
: JavaCallbacksSupport(aCallback)
, mDecoder(aDecoder)
{}
CallbacksSupport(RemoteVideoDecoder* aDecoder) : mDecoder(aDecoder) { }
virtual ~CallbacksSupport() {}
void HandleInputExhausted() override
{
mDecoder->InputExhausted();
}
void HandleOutput(Sample::Param aSample) override
{
@ -157,50 +159,51 @@ public:
int32_t flags;
bool ok = NS_SUCCEEDED(info->Flags(&flags));
MOZ_ASSERT(ok);
int32_t offset;
ok |= NS_SUCCEEDED(info->Offset(&offset));
MOZ_ASSERT(ok);
ok &= NS_SUCCEEDED(info->Offset(&offset));
int64_t presentationTimeUs;
ok |= NS_SUCCEEDED(info->PresentationTimeUs(&presentationTimeUs));
MOZ_ASSERT(ok);
ok &= NS_SUCCEEDED(info->PresentationTimeUs(&presentationTimeUs));
int32_t size;
ok |= NS_SUCCEEDED(info->Size(&size));
MOZ_ASSERT(ok);
ok &= NS_SUCCEEDED(info->Size(&size));
NS_ENSURE_TRUE_VOID(ok);
if (!ok) {
HandleError(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
RESULT_DETAIL("VideoCallBack::HandleOutput")));
return;
}
if (size > 0) {
RefPtr<layers::Image> img =
new SurfaceTextureImage(mDecoder->mSurfaceTexture.get(), mDecoder->mConfig.mDisplay,
gl::OriginPos::BottomLeft);
RefPtr<layers::Image> img = new SurfaceTextureImage(
mDecoder->mSurfaceTexture.get(), mDecoder->mConfig.mDisplay,
gl::OriginPos::BottomLeft);
RefPtr<VideoData> v =
VideoData::CreateFromImage(mDecoder->mConfig,
offset,
presentationTimeUs,
durationUs.value(),
img,
!!(flags & MediaCodec::BUFFER_FLAG_SYNC_FRAME),
presentationTimeUs,
gfx::IntRect(0, 0,
mDecoder->mConfig.mDisplay.width,
mDecoder->mConfig.mDisplay.height));
RefPtr<VideoData> v = VideoData::CreateFromImage(
mDecoder->mConfig, offset, presentationTimeUs, durationUs.value(),
img, !!(flags & MediaCodec::BUFFER_FLAG_SYNC_FRAME),
presentationTimeUs,
gfx::IntRect(0, 0, mDecoder->mConfig.mDisplay.width,
mDecoder->mConfig.mDisplay.height));
UniquePtr<VideoData::Listener> listener(new RenderOrReleaseOutput(mDecoder->mJavaDecoder, aSample));
UniquePtr<VideoData::Listener> listener(
new RenderOrReleaseOutput(mDecoder->mJavaDecoder, aSample));
v->SetListener(Move(listener));
mDecoderCallback->Output(v);
mDecoder->Output(v);
}
if ((flags & MediaCodec::BUFFER_FLAG_END_OF_STREAM) != 0) {
mDecoderCallback->DrainComplete();
mDecoder->DrainComplete();
}
}
void HandleError(const MediaResult& aError) override
{
mDecoder->Error(aError);
}
friend class RemoteDataDecoder;
private:
@ -209,11 +212,10 @@ public:
RemoteVideoDecoder(const VideoInfo& aConfig,
MediaFormat::Param aFormat,
MediaDataDecoderCallback* aCallback,
layers::ImageContainer* aImageContainer,
const nsString& aDrmStubId)
const nsString& aDrmStubId, TaskQueue* aTaskQueue)
: RemoteDataDecoder(MediaData::Type::VIDEO_DATA, aConfig.mMimeType,
aFormat, aCallback, aDrmStubId)
aFormat, aDrmStubId, aTaskQueue)
, mImageContainer(aImageContainer)
, mConfig(aConfig)
{
@ -224,59 +226,66 @@ public:
mSurfaceTexture = AndroidSurfaceTexture::Create();
if (!mSurfaceTexture) {
NS_WARNING("Failed to create SurfaceTexture for video decode\n");
return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR,
__func__);
}
if (!jni::IsFennec()) {
NS_WARNING("Remote decoding not supported in non-Fennec environment\n");
return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR,
__func__);
}
// Register native methods.
JavaCallbacksSupport::Init();
mJavaCallbacks = CodecProxy::NativeCallbacks::New();
JavaCallbacksSupport::AttachNative(mJavaCallbacks,
mozilla::MakeUnique<CallbacksSupport>(this, mCallback));
JavaCallbacksSupport::AttachNative(
mJavaCallbacks, mozilla::MakeUnique<CallbacksSupport>(this));
mJavaDecoder = CodecProxy::Create(mFormat,
mSurfaceTexture->JavaSurface(),
mJavaCallbacks,
mDrmStubId);
if (mJavaDecoder == nullptr) {
return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR,
__func__);
}
mIsCodecSupportAdaptivePlayback = mJavaDecoder->IsAdaptivePlaybackSupported();
mInputDurations.Clear();
mIsCodecSupportAdaptivePlayback =
mJavaDecoder->IsAdaptivePlaybackSupported();
return InitPromise::CreateAndResolve(TrackInfo::kVideoTrack, __func__);
}
void Flush() override
RefPtr<MediaDataDecoder::FlushPromise> Flush() override
{
mInputDurations.Clear();
RemoteDataDecoder::Flush();
return RemoteDataDecoder::Flush();
}
void Drain() override
RefPtr<MediaDataDecoder::DecodePromise> Drain() override
{
RemoteDataDecoder::Drain();
mInputDurations.Put(0);
return RemoteDataDecoder::Drain();
}
void Input(MediaRawData* aSample) override
RefPtr<MediaDataDecoder::DecodePromise> Decode(MediaRawData* aSample) override
{
RemoteDataDecoder::Input(aSample);
mInputDurations.Put(aSample->mDuration);
return RemoteDataDecoder::Decode(aSample);
}
bool SupportDecoderRecycling() const override { return mIsCodecSupportAdaptivePlayback; }
bool SupportDecoderRecycling() const override
{
return mIsCodecSupportAdaptivePlayback;
}
private:
class DurationQueue {
class DurationQueue
{
public:
DurationQueue() : mMutex("Video duration queue") {}
DurationQueue() : mMutex("Video duration queue") { }
void Clear()
{
@ -309,75 +318,34 @@ private:
};
layers::ImageContainer* mImageContainer;
const VideoInfo& mConfig;
const VideoInfo mConfig;
RefPtr<AndroidSurfaceTexture> mSurfaceTexture;
DurationQueue mInputDurations;
bool mIsCodecSupportAdaptivePlayback = false;
};
class RemoteEMEVideoDecoder : public RemoteVideoDecoder {
public:
RemoteEMEVideoDecoder(const VideoInfo& aConfig,
MediaFormat::Param aFormat,
MediaDataDecoderCallback* aCallback,
layers::ImageContainer* aImageContainer,
const nsString& aDrmStubId,
CDMProxy* aProxy,
TaskQueue* aTaskQueue)
: RemoteVideoDecoder(aConfig, aFormat, aCallback, aImageContainer, aDrmStubId)
, mSamplesWaitingForKey(new SamplesWaitingForKey(this, aCallback,
aTaskQueue, aProxy))
{
}
void Input(MediaRawData* aSample) override;
void Shutdown() override;
private:
RefPtr<SamplesWaitingForKey> mSamplesWaitingForKey;
};
void
RemoteEMEVideoDecoder::Input(MediaRawData* aSample)
{
if (mSamplesWaitingForKey->WaitIfKeyNotUsable(aSample)) {
return;
}
RemoteVideoDecoder::Input(aSample);
}
void
RemoteEMEVideoDecoder::Shutdown()
{
RemoteVideoDecoder::Shutdown();
mSamplesWaitingForKey->BreakCycles();
mSamplesWaitingForKey = nullptr;
}
class RemoteAudioDecoder : public RemoteDataDecoder
{
public:
RemoteAudioDecoder(const AudioInfo& aConfig,
MediaFormat::Param aFormat,
MediaDataDecoderCallback* aCallback,
const nsString& aDrmStubId)
const nsString& aDrmStubId, TaskQueue* aTaskQueue)
: RemoteDataDecoder(MediaData::Type::AUDIO_DATA, aConfig.mMimeType,
aFormat, aCallback, aDrmStubId)
aFormat, aDrmStubId, aTaskQueue)
, mConfig(aConfig)
{
JNIEnv* const env = jni::GetEnvForThread();
bool formatHasCSD = false;
NS_ENSURE_SUCCESS_VOID(aFormat->ContainsKey(NS_LITERAL_STRING("csd-0"), &formatHasCSD));
NS_ENSURE_SUCCESS_VOID(
aFormat->ContainsKey(NS_LITERAL_STRING("csd-0"), &formatHasCSD));
if (!formatHasCSD && aConfig.mCodecSpecificConfig->Length() >= 2) {
jni::ByteBuffer::LocalRef buffer(env);
buffer = jni::ByteBuffer::New(
aConfig.mCodecSpecificConfig->Elements(),
buffer = jni::ByteBuffer::New(aConfig.mCodecSpecificConfig->Elements(),
aConfig.mCodecSpecificConfig->Length());
NS_ENSURE_SUCCESS_VOID(aFormat->SetByteBuffer(NS_LITERAL_STRING("csd-0"),
buffer));
NS_ENSURE_SUCCESS_VOID(
aFormat->SetByteBuffer(NS_LITERAL_STRING("csd-0"), buffer));
}
}
@ -387,12 +355,14 @@ public:
JavaCallbacksSupport::Init();
mJavaCallbacks = CodecProxy::NativeCallbacks::New();
JavaCallbacksSupport::AttachNative(mJavaCallbacks,
mozilla::MakeUnique<CallbacksSupport>(this, mCallback));
JavaCallbacksSupport::AttachNative(
mJavaCallbacks, mozilla::MakeUnique<CallbacksSupport>(this));
mJavaDecoder = CodecProxy::Create(mFormat, nullptr, mJavaCallbacks, mDrmStubId);
mJavaDecoder =
CodecProxy::Create(mFormat, nullptr, mJavaCallbacks, mDrmStubId);
if (mJavaDecoder == nullptr) {
return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR,
__func__);
}
return InitPromise::CreateAndResolve(TrackInfo::kAudioTrack, __func__);
@ -402,12 +372,12 @@ private:
class CallbacksSupport final : public JavaCallbacksSupport
{
public:
CallbacksSupport(RemoteAudioDecoder* aDecoder, MediaDataDecoderCallback* aCallback)
: JavaCallbacksSupport(aCallback)
, mDecoder(aDecoder)
{}
CallbacksSupport(RemoteAudioDecoder* aDecoder) : mDecoder(aDecoder) { }
virtual ~CallbacksSupport() {}
void HandleInputExhausted() override
{
mDecoder->InputExhausted();
}
void HandleOutput(Sample::Param aSample) override
{
@ -415,21 +385,21 @@ private:
int32_t flags;
bool ok = NS_SUCCEEDED(info->Flags(&flags));
MOZ_ASSERT(ok);
int32_t offset;
ok |= NS_SUCCEEDED(info->Offset(&offset));
MOZ_ASSERT(ok);
ok &= NS_SUCCEEDED(info->Offset(&offset));
int64_t presentationTimeUs;
ok |= NS_SUCCEEDED(info->PresentationTimeUs(&presentationTimeUs));
MOZ_ASSERT(ok);
ok &= NS_SUCCEEDED(info->PresentationTimeUs(&presentationTimeUs));
int32_t size;
ok |= NS_SUCCEEDED(info->Size(&size));
MOZ_ASSERT(ok);
ok &= NS_SUCCEEDED(info->Size(&size));
NS_ENSURE_TRUE_VOID(ok);
if (!ok) {
HandleError(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
RESULT_DETAIL("AudioCallBack::HandleOutput")));
return;
}
if (size > 0) {
#ifdef MOZ_SAMPLE_TYPE_S16
@ -441,25 +411,24 @@ private:
const int32_t numFrames = numSamples / mOutputChannels;
AlignedAudioBuffer audio(numSamples);
if (!audio) {
mDecoder->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__));
return;
}
jni::ByteBuffer::LocalRef dest = jni::ByteBuffer::New(audio.get(), size);
jni::ByteBuffer::LocalRef dest =
jni::ByteBuffer::New(audio.get(), size);
aSample->WriteToByteBuffer(dest);
RefPtr<AudioData> data = new AudioData(0, presentationTimeUs,
FramesToUsecs(numFrames, mOutputSampleRate).value(),
numFrames,
Move(audio),
mOutputChannels,
mOutputSampleRate);
RefPtr<AudioData> data = new AudioData(
0, presentationTimeUs,
FramesToUsecs(numFrames, mOutputSampleRate).value(), numFrames,
Move(audio), mOutputChannels, mOutputSampleRate);
mDecoderCallback->Output(data);
mDecoder->Output(data);
}
if ((flags & MediaCodec::BUFFER_FLAG_END_OF_STREAM) != 0) {
mDecoderCallback->DrainComplete();
return;
mDecoder->DrainComplete();
}
}
@ -468,13 +437,19 @@ private:
aFormat->GetInteger(NS_LITERAL_STRING("channel-count"), &mOutputChannels);
AudioConfig::ChannelLayout layout(mOutputChannels);
if (!layout.IsValid()) {
mDecoderCallback->Error(MediaResult(
mDecoder->Error(MediaResult(
NS_ERROR_DOM_MEDIA_FATAL_ERR,
RESULT_DETAIL("Invalid channel layout:%d", mOutputChannels)));
return;
}
aFormat->GetInteger(NS_LITERAL_STRING("sample-rate"), &mOutputSampleRate);
LOG("Audio output format changed: channels:%d sample rate:%d", mOutputChannels, mOutputSampleRate);
LOG("Audio output format changed: channels:%d sample rate:%d",
mOutputChannels, mOutputSampleRate);
}
void HandleError(const MediaResult& aError) override
{
mDecoder->Error(aError);
}
private:
@ -483,122 +458,98 @@ private:
int32_t mOutputSampleRate;
};
const AudioInfo& mConfig;
const AudioInfo mConfig;
};
class RemoteEMEAudioDecoder : public RemoteAudioDecoder {
public:
RemoteEMEAudioDecoder(const AudioInfo& aConfig, MediaFormat::Param aFormat,
MediaDataDecoderCallback* aCallback, const nsString& aDrmStubId,
CDMProxy* aProxy, TaskQueue* aTaskQueue)
: RemoteAudioDecoder(aConfig, aFormat, aCallback, aDrmStubId)
, mSamplesWaitingForKey(new SamplesWaitingForKey(this, aCallback,
aTaskQueue, aProxy))
{
}
void Input(MediaRawData* aSample) override;
void Shutdown() override;
private:
RefPtr<SamplesWaitingForKey> mSamplesWaitingForKey;
};
void
RemoteEMEAudioDecoder::Input(MediaRawData* aSample)
{
if (mSamplesWaitingForKey->WaitIfKeyNotUsable(aSample)) {
return;
}
RemoteAudioDecoder::Input(aSample);
}
void
RemoteEMEAudioDecoder::Shutdown()
{
RemoteAudioDecoder::Shutdown();
mSamplesWaitingForKey->BreakCycles();
mSamplesWaitingForKey = nullptr;
}
MediaDataDecoder*
already_AddRefed<MediaDataDecoder>
RemoteDataDecoder::CreateAudioDecoder(const AudioInfo& aConfig,
MediaFormat::Param aFormat,
MediaDataDecoderCallback* aCallback,
const nsString& aDrmStubId,
CDMProxy* aProxy,
TaskQueue* aTaskQueue)
CDMProxy* aProxy, TaskQueue* aTaskQueue)
{
RefPtr<MediaDataDecoder> decoder;
if (!aProxy) {
return new RemoteAudioDecoder(aConfig, aFormat, aCallback, aDrmStubId);
decoder = new RemoteAudioDecoder(aConfig, aFormat, aDrmStubId, aTaskQueue);
} else {
return new RemoteEMEAudioDecoder(aConfig,
aFormat,
aCallback,
aDrmStubId,
aProxy,
aTaskQueue);
// TODO in bug 1334061.
}
return decoder.forget();
}
MediaDataDecoder*
already_AddRefed<MediaDataDecoder>
RemoteDataDecoder::CreateVideoDecoder(const VideoInfo& aConfig,
MediaFormat::Param aFormat,
MediaDataDecoderCallback* aCallback,
layers::ImageContainer* aImageContainer,
const nsString& aDrmStubId,
CDMProxy* aProxy,
TaskQueue* aTaskQueue)
CDMProxy* aProxy, TaskQueue* aTaskQueue)
{
RefPtr<MediaDataDecoder> decoder;
if (!aProxy) {
return new RemoteVideoDecoder(aConfig, aFormat, aCallback, aImageContainer, aDrmStubId);
decoder = new RemoteVideoDecoder(aConfig, aFormat, aImageContainer,
aDrmStubId, aTaskQueue);
} else {
return new RemoteEMEVideoDecoder(aConfig,
aFormat,
aCallback,
aImageContainer,
aDrmStubId,
aProxy,
aTaskQueue);
// TODO in bug 1334061.
}
return decoder.forget();
}
RemoteDataDecoder::RemoteDataDecoder(MediaData::Type aType,
const nsACString& aMimeType,
MediaFormat::Param aFormat,
MediaDataDecoderCallback* aCallback,
const nsString& aDrmStubId)
const nsString& aDrmStubId,
TaskQueue* aTaskQueue)
: mType(aType)
, mMimeType(aMimeType)
, mFormat(aFormat)
, mCallback(aCallback)
, mDrmStubId(aDrmStubId)
, mTaskQueue(aTaskQueue)
{
}
void
RefPtr<MediaDataDecoder::FlushPromise>
RemoteDataDecoder::Flush()
{
mJavaDecoder->Flush();
RefPtr<RemoteDataDecoder> self = this;
return InvokeAsync(mTaskQueue, __func__, [self, this]() {
mDecodePromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
mDrainPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
mJavaDecoder->Flush();
return FlushPromise::CreateAndResolve(true, __func__);
});
}
void
RefPtr<MediaDataDecoder::DecodePromise>
RemoteDataDecoder::Drain()
{
BufferInfo::LocalRef bufferInfo;
nsresult rv = BufferInfo::New(&bufferInfo);
NS_ENSURE_SUCCESS_VOID(rv);
bufferInfo->Set(0, 0, -1, MediaCodec::BUFFER_FLAG_END_OF_STREAM);
RefPtr<RemoteDataDecoder> self = this;
return InvokeAsync(mTaskQueue, __func__, [self, this]() {
BufferInfo::LocalRef bufferInfo;
nsresult rv = BufferInfo::New(&bufferInfo);
if (NS_FAILED(rv)) {
return DecodePromise::CreateAndReject(NS_ERROR_OUT_OF_MEMORY, __func__);
}
bufferInfo->Set(0, 0, -1, MediaCodec::BUFFER_FLAG_END_OF_STREAM);
mJavaDecoder->Input(nullptr, bufferInfo, nullptr);
RefPtr<DecodePromise> p = mDrainPromise.Ensure(__func__);
mJavaDecoder->Input(nullptr, bufferInfo, nullptr);
return p;
});
}
void
RefPtr<ShutdownPromise>
RemoteDataDecoder::Shutdown()
{
LOG("");
RefPtr<RemoteDataDecoder> self = this;
return InvokeAsync(mTaskQueue, this, __func__,
&RemoteDataDecoder::ProcessShutdown);
}
RefPtr<ShutdownPromise>
RemoteDataDecoder::ProcessShutdown()
{
AssertOnTaskQueue();
mShutdown = true;
if (mJavaDecoder) {
mJavaDecoder->Release();
mJavaDecoder = nullptr;
@ -610,25 +561,97 @@ RemoteDataDecoder::Shutdown()
}
mFormat = nullptr;
return ShutdownPromise::CreateAndResolve(true, __func__);
}
void
RemoteDataDecoder::Input(MediaRawData* aSample)
RefPtr<MediaDataDecoder::DecodePromise>
RemoteDataDecoder::Decode(MediaRawData* aSample)
{
MOZ_ASSERT(aSample != nullptr);
jni::ByteBuffer::LocalRef bytes = jni::ByteBuffer::New(const_cast<uint8_t*>(aSample->Data()),
aSample->Size());
RefPtr<RemoteDataDecoder> self = this;
RefPtr<MediaRawData> sample = aSample;
return InvokeAsync(mTaskQueue, __func__, [self, sample, this]() {
jni::ByteBuffer::LocalRef bytes = jni::ByteBuffer::New(
const_cast<uint8_t*>(sample->Data()), sample->Size());
BufferInfo::LocalRef bufferInfo;
nsresult rv = BufferInfo::New(&bufferInfo);
if (NS_FAILED(rv)) {
mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__));
BufferInfo::LocalRef bufferInfo;
nsresult rv = BufferInfo::New(&bufferInfo);
if (NS_FAILED(rv)) {
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__);
}
bufferInfo->Set(0, sample->Size(), sample->mTime, 0);
RefPtr<DecodePromise> p = mDecodePromise.Ensure(__func__);
mJavaDecoder->Input(bytes, bufferInfo, GetCryptoInfoFromSample(sample));
return p;
});
}
void
RemoteDataDecoder::Output(MediaData* aSample)
{
if (!mTaskQueue->IsCurrentThreadIn()) {
mTaskQueue->Dispatch(
NewRunnableMethod<MediaData*>(this, &RemoteDataDecoder::Output, aSample));
return;
}
bufferInfo->Set(0, aSample->Size(), aSample->mTime, 0);
AssertOnTaskQueue();
if (mShutdown) {
return;
}
mDecodedData.AppendElement(aSample);
}
mJavaDecoder->Input(bytes, bufferInfo, GetCryptoInfoFromSample(aSample));
void
RemoteDataDecoder::InputExhausted()
{
if (!mTaskQueue->IsCurrentThreadIn()) {
mTaskQueue->Dispatch(
NewRunnableMethod(this, &RemoteDataDecoder::InputExhausted));
return;
}
AssertOnTaskQueue();
if (mShutdown) {
return;
}
mDecodePromise.ResolveIfExists(mDecodedData, __func__);
mDecodedData.Clear();
}
void
RemoteDataDecoder::DrainComplete()
{
if (!mTaskQueue->IsCurrentThreadIn()) {
mTaskQueue->Dispatch(
NewRunnableMethod(this, &RemoteDataDecoder::DrainComplete));
return;
}
AssertOnTaskQueue();
if (mShutdown) {
return;
}
mDrainPromise.ResolveIfExists(mDecodedData, __func__);
mDecodedData.Clear();
}
void
RemoteDataDecoder::Error(const MediaResult& aError)
{
if (!mTaskQueue->IsCurrentThreadIn()) {
mTaskQueue->Dispatch(
NewRunnableMethod<MediaResult>(this, &RemoteDataDecoder::Error, aError));
return;
}
AssertOnTaskQueue();
if (mShutdown) {
return;
}
mDecodePromise.RejectIfExists(aError, __func__);
mDrainPromise.RejectIfExists(aError, __func__);
mDecodedData.Clear();
}
} // mozilla

Просмотреть файл

@ -14,33 +14,26 @@
#include "mozilla/Monitor.h"
#include "mozilla/Maybe.h"
#include <deque>
namespace mozilla {
class RemoteDataDecoder : public MediaDataDecoder {
class RemoteDataDecoder : public MediaDataDecoder
{
public:
static MediaDataDecoder* CreateAudioDecoder(const AudioInfo& aConfig,
java::sdk::MediaFormat::Param aFormat,
MediaDataDecoderCallback* aCallback,
const nsString& aDrmStubId,
CDMProxy* aProxy,
TaskQueue* aTaskQueue);
static already_AddRefed<MediaDataDecoder> CreateAudioDecoder(
const AudioInfo& aConfig, java::sdk::MediaFormat::Param aFormat,
const nsString& aDrmStubId, CDMProxy* aProxy, TaskQueue* aTaskQueue);
static MediaDataDecoder* CreateVideoDecoder(const VideoInfo& aConfig,
java::sdk::MediaFormat::Param aFormat,
MediaDataDecoderCallback* aCallback,
layers::ImageContainer* aImageContainer,
const nsString& aDrmStubId,
CDMProxy* aProxy,
TaskQueue* aTaskQueue);
static already_AddRefed<MediaDataDecoder> CreateVideoDecoder(
const VideoInfo& aConfig, java::sdk::MediaFormat::Param aFormat,
layers::ImageContainer* aImageContainer, const nsString& aDrmStubId,
CDMProxy* aProxy, TaskQueue* aTaskQueue);
virtual ~RemoteDataDecoder() {}
virtual ~RemoteDataDecoder() { }
void Flush() override;
void Drain() override;
void Shutdown() override;
void Input(MediaRawData* aSample) override;
RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
RefPtr<DecodePromise> Drain() override;
RefPtr<FlushPromise> Flush() override;
RefPtr<ShutdownPromise> Shutdown() override;
const char* GetDescriptionName() const override
{
return "android remote decoder";
@ -50,19 +43,34 @@ protected:
RemoteDataDecoder(MediaData::Type aType,
const nsACString& aMimeType,
java::sdk::MediaFormat::Param aFormat,
MediaDataDecoderCallback* aCallback,
const nsString& aDrmStubId);
const nsString& aDrmStubId, TaskQueue* aTaskQueue);
// Methods only called on mTaskQueue.
RefPtr<ShutdownPromise> ProcessShutdown();
void Output(MediaData* aSample);
void InputExhausted();
void DrainComplete();
void Error(const MediaResult& aError);
void AssertOnTaskQueue()
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
}
MediaData::Type mType;
nsAutoCString mMimeType;
java::sdk::MediaFormat::GlobalRef mFormat;
MediaDataDecoderCallback* mCallback;
java::CodecProxy::GlobalRef mJavaDecoder;
java::CodecProxy::NativeCallbacks::GlobalRef mJavaCallbacks;
nsString mDrmStubId;
RefPtr<TaskQueue> mTaskQueue;
// Only ever accessed on mTaskqueue.
bool mShutdown = false;
MozPromiseHolder<DecodePromise> mDecodePromise;
MozPromiseHolder<DecodePromise> mDrainPromise;
DecodedData mDecodedData;
};
} // namespace mozilla

Просмотреть файл

@ -19,15 +19,12 @@
namespace mozilla {
AppleATDecoder::AppleATDecoder(const AudioInfo& aConfig,
TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback)
TaskQueue* aTaskQueue)
: mConfig(aConfig)
, mFileStreamError(false)
, mTaskQueue(aTaskQueue)
, mCallback(aCallback)
, mConverter(nullptr)
, mStream(nullptr)
, mIsFlushing(false)
, mParsedFramesForAACMagicCookie(0)
, mErrored(false)
{
@ -65,31 +62,26 @@ AppleATDecoder::Init()
return InitPromise::CreateAndResolve(TrackType::kAudioTrack, __func__);
}
void
AppleATDecoder::Input(MediaRawData* aSample)
RefPtr<MediaDataDecoder::DecodePromise>
AppleATDecoder::Decode(MediaRawData* aSample)
{
MOZ_ASSERT(mCallback->OnReaderTaskQueue());
LOG("mp4 input sample %p %lld us %lld pts%s %llu bytes audio",
aSample,
aSample->mDuration,
aSample->mTime,
aSample->mKeyframe ? " keyframe" : "",
LOG("mp4 input sample %p %lld us %lld pts%s %llu bytes audio", aSample,
aSample->mDuration, aSample->mTime, aSample->mKeyframe ? " keyframe" : "",
(unsigned long long)aSample->Size());
// Queue a task to perform the actual decoding on a separate thread.
nsCOMPtr<nsIRunnable> runnable =
NewRunnableMethod<RefPtr<MediaRawData>>(
this,
&AppleATDecoder::SubmitSample,
RefPtr<MediaRawData>(aSample));
mTaskQueue->Dispatch(runnable.forget());
RefPtr<AppleATDecoder> self = this;
RefPtr<MediaRawData> sample = aSample;
return InvokeAsync(mTaskQueue, __func__, [self, this, sample] {
return ProcessDecode(sample);
});
}
void
RefPtr<MediaDataDecoder::FlushPromise>
AppleATDecoder::ProcessFlush()
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
mQueuedSamples.Clear();
mDecodedSamples.Clear();
if (mConverter) {
OSStatus rv = AudioConverterReset(mConverter);
if (rv) {
@ -102,37 +94,34 @@ AppleATDecoder::ProcessFlush()
ProcessShutdown();
mErrored = false;
}
return FlushPromise::CreateAndResolve(true, __func__);
}
void
RefPtr<MediaDataDecoder::FlushPromise>
AppleATDecoder::Flush()
{
MOZ_ASSERT(mCallback->OnReaderTaskQueue());
LOG("Flushing AudioToolbox AAC decoder");
mIsFlushing = true;
nsCOMPtr<nsIRunnable> runnable =
NewRunnableMethod(this, &AppleATDecoder::ProcessFlush);
SyncRunnable::DispatchToThread(mTaskQueue, runnable);
mIsFlushing = false;
return InvokeAsync(mTaskQueue, this, __func__, &AppleATDecoder::ProcessFlush);
}
void
RefPtr<MediaDataDecoder::DecodePromise>
AppleATDecoder::Drain()
{
MOZ_ASSERT(mCallback->OnReaderTaskQueue());
LOG("Draining AudioToolbox AAC decoder");
mTaskQueue->AwaitIdle();
mCallback->DrainComplete();
Flush();
RefPtr<AppleATDecoder> self = this;
return InvokeAsync(mTaskQueue, __func__, [] {
return DecodePromise::CreateAndResolve(DecodedData(), __func__);
});
}
void
RefPtr<ShutdownPromise>
AppleATDecoder::Shutdown()
{
MOZ_ASSERT(mCallback->OnReaderTaskQueue());
nsCOMPtr<nsIRunnable> runnable =
NewRunnableMethod(this, &AppleATDecoder::ProcessShutdown);
SyncRunnable::DispatchToThread(mTaskQueue, runnable);
RefPtr<AppleATDecoder> self = this;
return InvokeAsync(mTaskQueue, __func__, [self, this]() {
ProcessShutdown();
return ShutdownPromise::CreateAndResolve(true, __func__);
});
}
void
@ -200,21 +189,16 @@ _PassthroughInputDataCallback(AudioConverterRef aAudioConverter,
return noErr;
}
void
AppleATDecoder::SubmitSample(MediaRawData* aSample)
RefPtr<MediaDataDecoder::DecodePromise>
AppleATDecoder::ProcessDecode(MediaRawData* aSample)
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
if (mIsFlushing) {
return;
}
MediaResult rv = NS_OK;
if (!mConverter) {
rv = SetupDecoder(aSample);
if (rv != NS_OK && rv != NS_ERROR_NOT_INITIALIZED) {
mCallback->Error(rv);
return;
return DecodePromise::CreateAndReject(rv, __func__);
}
}
@ -225,13 +209,13 @@ AppleATDecoder::SubmitSample(MediaRawData* aSample)
rv = DecodeSample(mQueuedSamples[i]);
if (NS_FAILED(rv)) {
mErrored = true;
mCallback->Error(rv);
return;
return DecodePromise::CreateAndReject(rv, __func__);
}
}
mQueuedSamples.Clear();
}
mCallback->InputExhausted();
return DecodePromise::CreateAndResolve(Move(mDecodedSamples), __func__);
}
MediaResult
@ -343,7 +327,7 @@ AppleATDecoder::DecodeSample(MediaRawData* aSample)
data.Forget(),
channels,
rate);
mCallback->Output(audio);
mDecodedSamples.AppendElement(Move(audio));
return NS_OK;
}
@ -496,8 +480,8 @@ AppleATDecoder::SetupChannelLayout()
if (tag != kAudioChannelLayoutTag_UseChannelDescriptions) {
AudioFormatPropertyID property =
tag == kAudioChannelLayoutTag_UseChannelBitmap
? kAudioFormatProperty_ChannelLayoutForBitmap
: kAudioFormatProperty_ChannelLayoutForTag;
? kAudioFormatProperty_ChannelLayoutForBitmap
: kAudioFormatProperty_ChannelLayoutForTag;
if (property == kAudioFormatProperty_ChannelLayoutForBitmap) {
status =
@ -632,6 +616,8 @@ _MetadataCallback(void* aAppleATDecoder,
UInt32* aFlags)
{
AppleATDecoder* decoder = static_cast<AppleATDecoder*>(aAppleATDecoder);
MOZ_RELEASE_ASSERT(decoder->mTaskQueue->IsCurrentThreadIn());
LOG("MetadataCallback receiving: '%s'", FourCC2Str(aProperty));
if (aProperty == kAudioFileStreamProperty_MagicCookieData) {
UInt32 size;

Просмотреть файл

@ -9,7 +9,6 @@
#include <AudioToolbox/AudioToolbox.h>
#include "PlatformDecoderModule.h"
#include "mozilla/ReentrantMonitor.h"
#include "mozilla/Vector.h"
#include "nsIThread.h"
#include "AudioConverter.h"
@ -17,20 +16,18 @@
namespace mozilla {
class TaskQueue;
class MediaDataDecoderCallback;
class AppleATDecoder : public MediaDataDecoder {
public:
AppleATDecoder(const AudioInfo& aConfig,
TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback);
virtual ~AppleATDecoder();
TaskQueue* aTaskQueue);
~AppleATDecoder();
RefPtr<InitPromise> Init() override;
void Input(MediaRawData* aSample) override;
void Flush() override;
void Drain() override;
void Shutdown() override;
RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
RefPtr<DecodePromise> Drain() override;
RefPtr<FlushPromise> Flush() override;
RefPtr<ShutdownPromise> Shutdown() override;
const char* GetDescriptionName() const override
{
@ -46,9 +43,9 @@ public:
// the magic cookie property.
bool mFileStreamError;
private:
const RefPtr<TaskQueue> mTaskQueue;
MediaDataDecoderCallback* mCallback;
private:
AudioConverterRef mConverter;
AudioStreamBasicDescription mOutputFormat;
UInt32 mFormatID;
@ -56,11 +53,11 @@ private:
nsTArray<RefPtr<MediaRawData>> mQueuedSamples;
UniquePtr<AudioConfig::ChannelLayout> mChannelLayout;
UniquePtr<AudioConverter> mAudioConverter;
Atomic<bool> mIsFlushing;
DecodedData mDecodedSamples;
void ProcessFlush();
RefPtr<DecodePromise> ProcessDecode(MediaRawData* aSample);
RefPtr<FlushPromise> ProcessFlush();
void ProcessShutdown();
void SubmitSample(MediaRawData* aSample);
MediaResult DecodeSample(MediaRawData* aSample);
MediaResult GetInputAudioDescription(AudioStreamBasicDescription& aDesc,
const nsTArray<uint8_t>& aExtraData);

Просмотреть файл

@ -74,7 +74,6 @@ AppleDecoderModule::CreateVideoDecoder(const CreateDecoderParams& aParams)
RefPtr<MediaDataDecoder> decoder =
new AppleVTDecoder(aParams.VideoConfig(),
aParams.mTaskQueue,
aParams.mCallback,
aParams.mImageContainer);
return decoder.forget();
}
@ -83,9 +82,7 @@ already_AddRefed<MediaDataDecoder>
AppleDecoderModule::CreateAudioDecoder(const CreateDecoderParams& aParams)
{
RefPtr<MediaDataDecoder> decoder =
new AppleATDecoder(aParams.AudioConfig(),
aParams.mTaskQueue,
aParams.mCallback);
new AppleATDecoder(aParams.AudioConfig(), aParams.mTaskQueue);
return decoder.forget();
}

Просмотреть файл

@ -26,10 +26,8 @@ namespace mozilla {
AppleVTDecoder::AppleVTDecoder(const VideoInfo& aConfig,
TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
layers::ImageContainer* aImageContainer)
: mExtraData(aConfig.mExtraData)
, mCallback(aCallback)
, mPictureWidth(aConfig.mImage.width)
, mPictureHeight(aConfig.mImage.height)
, mDisplayWidth(aConfig.mDisplay.width)
@ -37,24 +35,24 @@ AppleVTDecoder::AppleVTDecoder(const VideoInfo& aConfig,
, mTaskQueue(aTaskQueue)
, mMaxRefFrames(mp4_demuxer::H264::ComputeMaxRefFrames(aConfig.mExtraData))
, mImageContainer(aImageContainer)
, mIsShutDown(false)
#ifdef MOZ_WIDGET_UIKIT
, mUseSoftwareImages(true)
#else
, mUseSoftwareImages(false)
#endif
, mIsFlushing(false)
, mMonitor("AppleVideoDecoder")
, mMonitor("AppleVTDecoder")
, mFormat(nullptr)
, mSession(nullptr)
, mIsHardwareAccelerated(false)
{
MOZ_COUNT_CTOR(AppleVTDecoder);
// TODO: Verify aConfig.mime_type.
LOG("Creating AppleVTDecoder for %dx%d h.264 video",
mDisplayWidth,
mDisplayHeight
);
LOG("Creating AppleVTDecoder for %dx%d h.264 video", mDisplayWidth,
mDisplayHeight);
// To ensure our PromiseHolder is only ever accessed with the monitor held.
mPromise.SetMonitor(&mMonitor);
}
AppleVTDecoder::~AppleVTDecoder()
@ -74,11 +72,9 @@ AppleVTDecoder::Init()
return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
}
void
AppleVTDecoder::Input(MediaRawData* aSample)
RefPtr<MediaDataDecoder::DecodePromise>
AppleVTDecoder::Decode(MediaRawData* aSample)
{
MOZ_ASSERT(mCallback->OnReaderTaskQueue());
LOG("mp4 input sample %p pts %lld duration %lld us%s %d bytes",
aSample,
aSample->mTime,
@ -86,58 +82,130 @@ AppleVTDecoder::Input(MediaRawData* aSample)
aSample->mKeyframe ? " keyframe" : "",
aSample->Size());
mTaskQueue->Dispatch(NewRunnableMethod<RefPtr<MediaRawData>>(
this, &AppleVTDecoder::ProcessDecode, aSample));
RefPtr<AppleVTDecoder> self = this;
RefPtr<MediaRawData> sample = aSample;
return InvokeAsync(mTaskQueue, __func__, [self, this, sample] {
RefPtr<DecodePromise> p;
{
MonitorAutoLock mon(mMonitor);
p = mPromise.Ensure(__func__);
}
ProcessDecode(sample);
return p;
});
}
void
RefPtr<MediaDataDecoder::FlushPromise>
AppleVTDecoder::Flush()
{
MOZ_ASSERT(mCallback->OnReaderTaskQueue());
mIsFlushing = true;
nsCOMPtr<nsIRunnable> runnable =
NewRunnableMethod(this, &AppleVTDecoder::ProcessFlush);
SyncRunnable::DispatchToThread(mTaskQueue, runnable);
mIsFlushing = false;
mSeekTargetThreshold.reset();
return InvokeAsync(mTaskQueue, this, __func__, &AppleVTDecoder::ProcessFlush);
}
void
RefPtr<MediaDataDecoder::DecodePromise>
AppleVTDecoder::Drain()
{
MOZ_ASSERT(mCallback->OnReaderTaskQueue());
nsCOMPtr<nsIRunnable> runnable =
NewRunnableMethod(this, &AppleVTDecoder::ProcessDrain);
mTaskQueue->Dispatch(runnable.forget());
return InvokeAsync(mTaskQueue, this, __func__, &AppleVTDecoder::ProcessDrain);
}
RefPtr<ShutdownPromise>
AppleVTDecoder::Shutdown()
{
if (mTaskQueue) {
RefPtr<AppleVTDecoder> self = this;
return InvokeAsync(mTaskQueue, __func__, [self, this]() {
ProcessShutdown();
return ShutdownPromise::CreateAndResolve(true, __func__);
});
}
ProcessShutdown();
return ShutdownPromise::CreateAndResolve(true, __func__);
}
// Helper to fill in a timestamp structure.
static CMSampleTimingInfo
TimingInfoFromSample(MediaRawData* aSample)
{
CMSampleTimingInfo timestamp;
timestamp.duration = CMTimeMake(aSample->mDuration, USECS_PER_S);
timestamp.presentationTimeStamp =
CMTimeMake(aSample->mTime, USECS_PER_S);
timestamp.decodeTimeStamp =
CMTimeMake(aSample->mTimecode, USECS_PER_S);
return timestamp;
}
void
AppleVTDecoder::Shutdown()
{
MOZ_DIAGNOSTIC_ASSERT(!mIsShutDown);
mIsShutDown = true;
if (mTaskQueue) {
nsCOMPtr<nsIRunnable> runnable =
NewRunnableMethod(this, &AppleVTDecoder::ProcessShutdown);
mTaskQueue->Dispatch(runnable.forget());
} else {
ProcessShutdown();
}
}
nsresult
AppleVTDecoder::ProcessDecode(MediaRawData* aSample)
{
AssertOnTaskQueueThread();
if (mIsFlushing) {
return NS_OK;
MonitorAutoLock mon(mMonitor);
mPromise.Reject(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
return;
}
auto rv = DoDecode(aSample);
AutoCFRelease<CMBlockBufferRef> block = nullptr;
AutoCFRelease<CMSampleBufferRef> sample = nullptr;
VTDecodeInfoFlags infoFlags;
OSStatus rv;
return rv;
// FIXME: This copies the sample data. I think we can provide
// a custom block source which reuses the aSample buffer.
// But note that there may be a problem keeping the samples
// alive over multiple frames.
rv = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault, // Struct allocator.
const_cast<uint8_t*>(aSample->Data()),
aSample->Size(),
kCFAllocatorNull, // Block allocator.
NULL, // Block source.
0, // Data offset.
aSample->Size(),
false,
block.receive());
if (rv != noErr) {
NS_ERROR("Couldn't create CMBlockBuffer");
MonitorAutoLock mon(mMonitor);
mPromise.Reject(
MediaResult(NS_ERROR_OUT_OF_MEMORY,
RESULT_DETAIL("CMBlockBufferCreateWithMemoryBlock:%x", rv)),
__func__);
return;
}
CMSampleTimingInfo timestamp = TimingInfoFromSample(aSample);
rv = CMSampleBufferCreate(kCFAllocatorDefault, block, true, 0, 0, mFormat, 1, 1, &timestamp, 0, NULL, sample.receive());
if (rv != noErr) {
NS_ERROR("Couldn't create CMSampleBuffer");
MonitorAutoLock mon(mMonitor);
mPromise.Reject(MediaResult(NS_ERROR_OUT_OF_MEMORY,
RESULT_DETAIL("CMSampleBufferCreate:%x", rv)),
__func__);
return;
}
VTDecodeFrameFlags decodeFlags =
kVTDecodeFrame_EnableAsynchronousDecompression;
rv = VTDecompressionSessionDecodeFrame(mSession,
sample,
decodeFlags,
CreateAppleFrameRef(aSample),
&infoFlags);
if (rv != noErr && !(infoFlags & kVTDecodeInfo_FrameDropped)) {
LOG("AppleVTDecoder: Error %d VTDecompressionSessionDecodeFrame", rv);
NS_WARNING("Couldn't pass frame to decoder");
// It appears that even when VTDecompressionSessionDecodeFrame returned a
// failure. Decoding sometimes actually get processed.
MonitorAutoLock mon(mMonitor);
mPromise.RejectIfExists(
MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("VTDecompressionSessionDecodeFrame:%x", rv)),
__func__);
return;
}
}
void
@ -156,29 +224,39 @@ AppleVTDecoder::ProcessShutdown()
}
}
void
RefPtr<MediaDataDecoder::FlushPromise>
AppleVTDecoder::ProcessFlush()
{
AssertOnTaskQueueThread();
nsresult rv = WaitForAsynchronousFrames();
if (NS_FAILED(rv)) {
LOG("AppleVTDecoder::Flush failed waiting for platform decoder "
"with error:%d.", rv);
LOG("AppleVTDecoder::Flush failed waiting for platform decoder");
}
ClearReorderedFrames();
MonitorAutoLock mon(mMonitor);
mPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
while (!mReorderQueue.IsEmpty()) {
mReorderQueue.Pop();
}
mSeekTargetThreshold.reset();
mIsFlushing = false;
return FlushPromise::CreateAndResolve(true, __func__);
}
void
RefPtr<MediaDataDecoder::DecodePromise>
AppleVTDecoder::ProcessDrain()
{
AssertOnTaskQueueThread();
nsresult rv = WaitForAsynchronousFrames();
if (NS_FAILED(rv)) {
LOG("AppleVTDecoder::Drain failed waiting for platform decoder "
"with error:%d.", rv);
LOG("AppleVTDecoder::Drain failed waiting for platform decoder");
}
DrainReorderedFrames();
mCallback->DrainComplete();
MonitorAutoLock mon(mMonitor);
DecodedData samples;
while (!mReorderQueue.IsEmpty()) {
samples.AppendElement(Move(mReorderQueue.Pop()));
}
return DecodePromise::CreateAndResolve(Move(samples), __func__);
}
AppleVTDecoder::AppleFrameRef*
@ -188,24 +266,6 @@ AppleVTDecoder::CreateAppleFrameRef(const MediaRawData* aSample)
return new AppleFrameRef(*aSample);
}
void
AppleVTDecoder::DrainReorderedFrames()
{
MonitorAutoLock mon(mMonitor);
while (!mReorderQueue.IsEmpty()) {
mCallback->Output(mReorderQueue.Pop().get());
}
}
void
AppleVTDecoder::ClearReorderedFrames()
{
MonitorAutoLock mon(mMonitor);
while (!mReorderQueue.IsEmpty()) {
mReorderQueue.Pop();
}
}
void
AppleVTDecoder::SetSeekThreshold(const media::TimeUnit& aTime)
{
@ -247,17 +307,18 @@ PlatformCallback(void* decompressionOutputRefCon,
MOZ_ASSERT(CFGetTypeID(image) == CVPixelBufferGetTypeID(),
"VideoToolbox returned an unexpected image type");
}
decoder->OutputFrame(image, *frameRef);
}
// Copy and return a decoded frame.
nsresult
void
AppleVTDecoder::OutputFrame(CVPixelBufferRef aImage,
AppleVTDecoder::AppleFrameRef aFrameRef)
{
if (mIsShutDown || mIsFlushing) {
if (mIsFlushing) {
// We are in the process of flushing or shutting down; ignore frame.
return NS_OK;
return;
}
LOG("mp4 output frame %lld dts %lld pts %lld duration %lld us%s",
@ -271,8 +332,9 @@ AppleVTDecoder::OutputFrame(CVPixelBufferRef aImage,
if (!aImage) {
// Image was dropped by decoder or none return yet.
// We need more input to continue.
mCallback->InputExhausted();
return NS_OK;
MonitorAutoLock mon(mMonitor);
mPromise.Resolve(DecodedData(), __func__);
return;
}
bool useNullSample = false;
@ -310,10 +372,12 @@ AppleVTDecoder::OutputFrame(CVPixelBufferRef aImage,
CVReturn rv = CVPixelBufferLockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly);
if (rv != kCVReturnSuccess) {
NS_ERROR("error locking pixel data");
mCallback->Error(
MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("CVPixelBufferLockBaseAddress:%x", rv)));
return NS_ERROR_DOM_MEDIA_DECODE_ERR;
MonitorAutoLock mon(mMonitor);
mPromise.Reject(
MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("CVPixelBufferLockBaseAddress:%x", rv)),
__func__);
return;
}
// Y plane.
buffer.mPlanes[0].mData =
@ -378,22 +442,23 @@ AppleVTDecoder::OutputFrame(CVPixelBufferRef aImage,
if (!data) {
NS_ERROR("Couldn't create VideoData for frame");
mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__));
return NS_ERROR_OUT_OF_MEMORY;
MonitorAutoLock mon(mMonitor);
mPromise.Reject(MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__);
return;
}
// Frames come out in DTS order but we need to output them
// in composition order.
MonitorAutoLock mon(mMonitor);
mReorderQueue.Push(data);
if (mReorderQueue.Length() > mMaxRefFrames) {
mCallback->Output(mReorderQueue.Pop().get());
DecodedData results;
while (mReorderQueue.Length() > mMaxRefFrames) {
results.AppendElement(mReorderQueue.Pop());
}
mCallback->InputExhausted();
mPromise.Resolve(Move(results), __func__);
LOG("%llu decoded frames queued",
static_cast<unsigned long long>(mReorderQueue.Length()));
return NS_OK;
}
nsresult
@ -401,86 +466,12 @@ AppleVTDecoder::WaitForAsynchronousFrames()
{
OSStatus rv = VTDecompressionSessionWaitForAsynchronousFrames(mSession);
if (rv != noErr) {
LOG("AppleVTDecoder: Error %d waiting for asynchronous frames", rv);
NS_ERROR("AppleVTDecoder: Error waiting for asynchronous frames");
return NS_ERROR_FAILURE;
}
return NS_OK;
}
// Helper to fill in a timestamp structure.
static CMSampleTimingInfo
TimingInfoFromSample(MediaRawData* aSample)
{
CMSampleTimingInfo timestamp;
timestamp.duration = CMTimeMake(aSample->mDuration, USECS_PER_S);
timestamp.presentationTimeStamp =
CMTimeMake(aSample->mTime, USECS_PER_S);
timestamp.decodeTimeStamp =
CMTimeMake(aSample->mTimecode, USECS_PER_S);
return timestamp;
}
MediaResult
AppleVTDecoder::DoDecode(MediaRawData* aSample)
{
AssertOnTaskQueueThread();
// For some reason this gives me a double-free error with stagefright.
AutoCFRelease<CMBlockBufferRef> block = nullptr;
AutoCFRelease<CMSampleBufferRef> sample = nullptr;
VTDecodeInfoFlags infoFlags;
OSStatus rv;
// FIXME: This copies the sample data. I think we can provide
// a custom block source which reuses the aSample buffer.
// But note that there may be a problem keeping the samples
// alive over multiple frames.
rv = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault, // Struct allocator.
const_cast<uint8_t*>(aSample->Data()),
aSample->Size(),
kCFAllocatorNull, // Block allocator.
NULL, // Block source.
0, // Data offset.
aSample->Size(),
false,
block.receive());
if (rv != noErr) {
NS_ERROR("Couldn't create CMBlockBuffer");
mCallback->Error(
MediaResult(NS_ERROR_OUT_OF_MEMORY,
RESULT_DETAIL("CMBlockBufferCreateWithMemoryBlock:%x", rv)));
return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
}
CMSampleTimingInfo timestamp = TimingInfoFromSample(aSample);
rv = CMSampleBufferCreate(kCFAllocatorDefault, block, true, 0, 0, mFormat, 1, 1, &timestamp, 0, NULL, sample.receive());
if (rv != noErr) {
NS_ERROR("Couldn't create CMSampleBuffer");
mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY,
RESULT_DETAIL("CMSampleBufferCreate:%x", rv)));
return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
}
VTDecodeFrameFlags decodeFlags =
kVTDecodeFrame_EnableAsynchronousDecompression;
rv = VTDecompressionSessionDecodeFrame(mSession,
sample,
decodeFlags,
CreateAppleFrameRef(aSample),
&infoFlags);
if (rv != noErr && !(infoFlags & kVTDecodeInfo_FrameDropped)) {
LOG("AppleVTDecoder: Error %d VTDecompressionSessionDecodeFrame", rv);
NS_WARNING("Couldn't pass frame to decoder");
mCallback->Error(
MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("VTDecompressionSessionDecodeFrame:%x", rv)));
return NS_ERROR_DOM_MEDIA_DECODE_ERR;
}
return NS_OK;
}
nsresult
AppleVTDecoder::InitializeSession()
{
@ -670,5 +661,4 @@ AppleVTDecoder::CreateOutputConfiguration()
#endif
}
} // namespace mozilla

Просмотреть файл

@ -21,7 +21,6 @@ class AppleVTDecoder : public MediaDataDecoder {
public:
AppleVTDecoder(const VideoInfo& aConfig,
TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
layers::ImageContainer* aImageContainer);
class AppleFrameRef {
@ -43,10 +42,10 @@ public:
};
RefPtr<InitPromise> Init() override;
void Input(MediaRawData* aSample) override;
void Flush() override;
void Drain() override;
void Shutdown() override;
RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
RefPtr<DecodePromise> Drain() override;
RefPtr<FlushPromise> Flush() override;
RefPtr<ShutdownPromise> Shutdown() override;
void SetSeekThreshold(const media::TimeUnit& aTime) override;
bool IsHardwareAccelerated(nsACString& aFailureReason) const override
@ -57,21 +56,20 @@ public:
const char* GetDescriptionName() const override
{
return mIsHardwareAccelerated
? "apple hardware VT decoder"
: "apple software VT decoder";
? "apple hardware VT decoder"
: "apple software VT decoder";
}
// Access from the taskqueue and the decoder's thread.
// OutputFrame is thread-safe.
nsresult OutputFrame(CVPixelBufferRef aImage,
AppleFrameRef aFrameRef);
void OutputFrame(CVPixelBufferRef aImage, AppleFrameRef aFrameRef);
private:
virtual ~AppleVTDecoder();
void ProcessFlush();
void ProcessDrain();
RefPtr<FlushPromise> ProcessFlush();
RefPtr<DecodePromise> ProcessDrain();
void ProcessShutdown();
nsresult ProcessDecode(MediaRawData* aSample);
void ProcessDecode(MediaRawData* aSample);
void AssertOnTaskQueueThread()
{
@ -79,12 +77,9 @@ private:
}
AppleFrameRef* CreateAppleFrameRef(const MediaRawData* aSample);
void DrainReorderedFrames();
void ClearReorderedFrames();
CFDictionaryRef CreateOutputConfiguration();
const RefPtr<MediaByteBuffer> mExtraData;
MediaDataDecoderCallback* mCallback;
const uint32_t mPictureWidth;
const uint32_t mPictureHeight;
const uint32_t mDisplayWidth;
@ -95,22 +90,21 @@ private:
nsresult WaitForAsynchronousFrames();
CFDictionaryRef CreateDecoderSpecification();
CFDictionaryRef CreateDecoderExtensions();
// Method to pass a frame to VideoToolbox for decoding.
MediaResult DoDecode(MediaRawData* aSample);
const RefPtr<TaskQueue> mTaskQueue;
const uint32_t mMaxRefFrames;
const RefPtr<layers::ImageContainer> mImageContainer;
Atomic<bool> mIsShutDown;
const bool mUseSoftwareImages;
// Set on reader/decode thread calling Flush() to indicate that output is
// not required and so input samples on mTaskQueue need not be processed.
// Cleared on mTaskQueue in ProcessDrain().
Atomic<bool> mIsFlushing;
// Protects mReorderQueue.
// Protects mReorderQueue and mPromise.
Monitor mMonitor;
ReorderQueue mReorderQueue;
MozPromiseHolder<DecodePromise> mPromise;
// Decoded frame will be dropped if its pts is smaller than this
// value. It shold be initialized before Input() or after Flush(). So it is
// safe to access it in OutputFrame without protecting.

Просмотреть файл

@ -15,9 +15,8 @@ namespace mozilla
{
FFmpegAudioDecoder<LIBAV_VER>::FFmpegAudioDecoder(FFmpegLibWrapper* aLib,
TaskQueue* aTaskQueue, MediaDataDecoderCallback* aCallback,
const AudioInfo& aConfig)
: FFmpegDataDecoder(aLib, aTaskQueue, aCallback, GetCodecId(aConfig.mMimeType))
TaskQueue* aTaskQueue, const AudioInfo& aConfig)
: FFmpegDataDecoder(aLib, aTaskQueue, GetCodecId(aConfig.mMimeType))
{
MOZ_COUNT_CTOR(FFmpegAudioDecoder);
// Use a new MediaByteBuffer as the object will be modified during initialization.
@ -117,8 +116,8 @@ CopyAndPackAudio(AVFrame* aFrame, uint32_t aNumChannels, uint32_t aNumAFrames)
return audio;
}
MediaResult
FFmpegAudioDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample)
RefPtr<MediaDataDecoder::DecodePromise>
FFmpegAudioDecoder<LIBAV_VER>::ProcessDecode(MediaRawData* aSample)
{
AVPacket packet;
mLib->av_init_packet(&packet);
@ -127,14 +126,17 @@ FFmpegAudioDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample)
packet.size = aSample->Size();
if (!PrepareFrame()) {
return MediaResult(
NS_ERROR_OUT_OF_MEMORY,
RESULT_DETAIL("FFmpeg audio decoder failed to allocate frame"));
return DecodePromise::CreateAndReject(
MediaResult(
NS_ERROR_OUT_OF_MEMORY,
RESULT_DETAIL("FFmpeg audio decoder failed to allocate frame")),
__func__);
}
int64_t samplePosition = aSample->mOffset;
media::TimeUnit pts = media::TimeUnit::FromMicroseconds(aSample->mTime);
DecodedData results;
while (packet.size > 0) {
int decoded;
int bytesConsumed =
@ -142,8 +144,10 @@ FFmpegAudioDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample)
if (bytesConsumed < 0) {
NS_WARNING("FFmpeg audio decoder error.");
return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("FFmpeg audio error:%d", bytesConsumed));
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("FFmpeg audio error:%d", bytesConsumed)),
__func__);
}
if (mFrame->format != AV_SAMPLE_FMT_FLT &&
@ -152,18 +156,21 @@ FFmpegAudioDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample)
mFrame->format != AV_SAMPLE_FMT_S16P &&
mFrame->format != AV_SAMPLE_FMT_S32 &&
mFrame->format != AV_SAMPLE_FMT_S32P) {
return MediaResult(
NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("FFmpeg audio decoder outputs unsupported audio format"));
return DecodePromise::CreateAndReject(
MediaResult(
NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("FFmpeg audio decoder outputs unsupported audio format")),
__func__);
}
if (decoded) {
uint32_t numChannels = mCodecContext->channels;
AudioConfig::ChannelLayout layout(numChannels);
if (!layout.IsValid()) {
return MediaResult(
NS_ERROR_DOM_MEDIA_FATAL_ERR,
RESULT_DETAIL("Unsupported channel layout:%u", numChannels));
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
RESULT_DETAIL("Unsupported channel layout:%u", numChannels)),
__func__);
}
uint32_t samplingRate = mCodecContext->sample_rate;
@ -171,44 +178,46 @@ FFmpegAudioDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample)
AlignedAudioBuffer audio =
CopyAndPackAudio(mFrame, numChannels, mFrame->nb_samples);
if (!audio) {
return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__);
}
media::TimeUnit duration =
FramesToTimeUnit(mFrame->nb_samples, samplingRate);
if (!duration.IsValid()) {
return MediaResult(
NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
RESULT_DETAIL("Invalid sample duration"));
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
RESULT_DETAIL("Invalid sample duration")),
__func__);
}
RefPtr<AudioData> data = new AudioData(samplePosition,
pts.ToMicroseconds(),
duration.ToMicroseconds(),
mFrame->nb_samples,
Move(audio),
numChannels,
samplingRate);
mCallback->Output(data);
pts += duration;
if (!pts.IsValid()) {
return MediaResult(
NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
RESULT_DETAIL("Invalid count of accumulated audio samples"));
media::TimeUnit newpts = pts + duration;
if (!newpts.IsValid()) {
return DecodePromise::CreateAndReject(
MediaResult(
NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
RESULT_DETAIL("Invalid count of accumulated audio samples")),
__func__);
}
results.AppendElement(new AudioData(
samplePosition, pts.ToMicroseconds(), duration.ToMicroseconds(),
mFrame->nb_samples, Move(audio), numChannels, samplingRate));
pts = newpts;
}
packet.data += bytesConsumed;
packet.size -= bytesConsumed;
samplePosition += bytesConsumed;
}
return NS_OK;
return DecodePromise::CreateAndResolve(Move(results), __func__);
}
void
RefPtr<MediaDataDecoder::DecodePromise>
FFmpegAudioDecoder<LIBAV_VER>::ProcessDrain()
{
ProcessFlush();
mCallback->DrainComplete();
return DecodePromise::CreateAndResolve(DecodedData(), __func__);
}
AVCodecID

Просмотреть файл

@ -22,7 +22,6 @@ class FFmpegAudioDecoder<LIBAV_VER> : public FFmpegDataDecoder<LIBAV_VER>
{
public:
FFmpegAudioDecoder(FFmpegLibWrapper* aLib, TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
const AudioInfo& aConfig);
virtual ~FFmpegAudioDecoder();
@ -35,8 +34,8 @@ public:
}
private:
MediaResult DoDecode(MediaRawData* aSample) override;
void ProcessDrain() override;
RefPtr<DecodePromise> ProcessDecode(MediaRawData* aSample) override;
RefPtr<DecodePromise> ProcessDrain() override;
};
} // namespace mozilla

Просмотреть файл

@ -21,18 +21,15 @@ namespace mozilla
StaticMutex FFmpegDataDecoder<LIBAV_VER>::sMonitor;
FFmpegDataDecoder<LIBAV_VER>::FFmpegDataDecoder(FFmpegLibWrapper* aLib,
TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
AVCodecID aCodecID)
FFmpegDataDecoder<LIBAV_VER>::FFmpegDataDecoder(FFmpegLibWrapper* aLib,
TaskQueue* aTaskQueue,
AVCodecID aCodecID)
: mLib(aLib)
, mCallback(aCallback)
, mCodecContext(nullptr)
, mFrame(NULL)
, mExtraData(nullptr)
, mCodecID(aCodecID)
, mTaskQueue(aTaskQueue)
, mIsFlushing(false)
{
MOZ_ASSERT(aLib);
MOZ_COUNT_CTOR(FFmpegDataDecoder);
@ -90,67 +87,49 @@ FFmpegDataDecoder<LIBAV_VER>::InitDecoder()
return NS_OK;
}
void
RefPtr<ShutdownPromise>
FFmpegDataDecoder<LIBAV_VER>::Shutdown()
{
if (mTaskQueue) {
nsCOMPtr<nsIRunnable> runnable =
NewRunnableMethod(this, &FFmpegDataDecoder<LIBAV_VER>::ProcessShutdown);
mTaskQueue->Dispatch(runnable.forget());
} else {
ProcessShutdown();
RefPtr<FFmpegDataDecoder<LIBAV_VER>> self = this;
return InvokeAsync(mTaskQueue, __func__, [self, this]() {
ProcessShutdown();
return ShutdownPromise::CreateAndResolve(true, __func__);
});
}
ProcessShutdown();
return ShutdownPromise::CreateAndResolve(true, __func__);
}
void
FFmpegDataDecoder<LIBAV_VER>::ProcessDecode(MediaRawData* aSample)
RefPtr<MediaDataDecoder::DecodePromise>
FFmpegDataDecoder<LIBAV_VER>::Decode(MediaRawData* aSample)
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
if (mIsFlushing) {
return;
}
MediaResult rv = DoDecode(aSample);
if (NS_FAILED(rv)) {
mCallback->Error(rv);
} else {
mCallback->InputExhausted();
}
return InvokeAsync<MediaRawData*>(mTaskQueue, this, __func__,
&FFmpegDataDecoder::ProcessDecode, aSample);
}
void
FFmpegDataDecoder<LIBAV_VER>::Input(MediaRawData* aSample)
{
mTaskQueue->Dispatch(NewRunnableMethod<RefPtr<MediaRawData>>(
this, &FFmpegDataDecoder::ProcessDecode, aSample));
}
void
RefPtr<MediaDataDecoder::FlushPromise>
FFmpegDataDecoder<LIBAV_VER>::Flush()
{
MOZ_ASSERT(mCallback->OnReaderTaskQueue());
mIsFlushing = true;
nsCOMPtr<nsIRunnable> runnable =
NewRunnableMethod(this, &FFmpegDataDecoder<LIBAV_VER>::ProcessFlush);
SyncRunnable::DispatchToThread(mTaskQueue, runnable);
mIsFlushing = false;
return InvokeAsync(mTaskQueue, this, __func__,
&FFmpegDataDecoder<LIBAV_VER>::ProcessFlush);
}
void
RefPtr<MediaDataDecoder::DecodePromise>
FFmpegDataDecoder<LIBAV_VER>::Drain()
{
MOZ_ASSERT(mCallback->OnReaderTaskQueue());
nsCOMPtr<nsIRunnable> runnable =
NewRunnableMethod(this, &FFmpegDataDecoder<LIBAV_VER>::ProcessDrain);
mTaskQueue->Dispatch(runnable.forget());
return InvokeAsync(mTaskQueue, this, __func__,
&FFmpegDataDecoder<LIBAV_VER>::ProcessDrain);
}
void
RefPtr<MediaDataDecoder::FlushPromise>
FFmpegDataDecoder<LIBAV_VER>::ProcessFlush()
{
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
if (mCodecContext) {
mLib->avcodec_flush_buffers(mCodecContext);
}
return FlushPromise::CreateAndResolve(true, __func__);
}
void

Просмотреть файл

@ -25,30 +25,28 @@ class FFmpegDataDecoder<LIBAV_VER> : public MediaDataDecoder
{
public:
FFmpegDataDecoder(FFmpegLibWrapper* aLib, TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
AVCodecID aCodecID);
virtual ~FFmpegDataDecoder();
static bool Link();
RefPtr<InitPromise> Init() override = 0;
void Input(MediaRawData* aSample) override;
void Flush() override;
void Drain() override;
void Shutdown() override;
RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
RefPtr<DecodePromise> Drain() override;
RefPtr<FlushPromise> Flush() override;
RefPtr<ShutdownPromise> Shutdown() override;
static AVCodec* FindAVCodec(FFmpegLibWrapper* aLib, AVCodecID aCodec);
protected:
// Flush and Drain operation, always run
virtual void ProcessFlush();
virtual RefPtr<FlushPromise> ProcessFlush();
virtual void ProcessShutdown();
virtual void InitCodecContext() {}
AVFrame* PrepareFrame();
nsresult InitDecoder();
FFmpegLibWrapper* mLib;
MediaDataDecoderCallback* mCallback;
AVCodecContext* mCodecContext;
AVFrame* mFrame;
@ -56,15 +54,12 @@ protected:
AVCodecID mCodecID;
private:
void ProcessDecode(MediaRawData* aSample);
virtual MediaResult DoDecode(MediaRawData* aSample) = 0;
virtual void ProcessDrain() = 0;
virtual RefPtr<DecodePromise> ProcessDecode(MediaRawData* aSample) = 0;
virtual RefPtr<DecodePromise> ProcessDrain() = 0;
static StaticMutex sMonitor;
const RefPtr<TaskQueue> mTaskQueue;
// Set/cleared on reader thread calling Flush() to indicate that output is
// not required and so input samples on mTaskQueue need not be processed.
Atomic<bool> mIsFlushing;
MozPromiseHolder<DecodePromise> mPromise;
};
} // namespace mozilla

Просмотреть файл

@ -43,7 +43,6 @@ public:
RefPtr<MediaDataDecoder> decoder =
new FFmpegVideoDecoder<V>(mLib,
aParams.mTaskQueue,
aParams.mCallback,
aParams.VideoConfig(),
aParams.mImageContainer);
return decoder.forget();
@ -55,7 +54,6 @@ public:
RefPtr<MediaDataDecoder> decoder =
new FFmpegAudioDecoder<V>(mLib,
aParams.mTaskQueue,
aParams.mCallback,
aParams.AudioConfig());
return decoder.forget();
}

Просмотреть файл

@ -102,10 +102,8 @@ FFmpegVideoDecoder<LIBAV_VER>::PtsCorrectionContext::Reset()
}
FFmpegVideoDecoder<LIBAV_VER>::FFmpegVideoDecoder(FFmpegLibWrapper* aLib,
TaskQueue* aTaskQueue, MediaDataDecoderCallback* aCallback,
const VideoInfo& aConfig,
ImageContainer* aImageContainer)
: FFmpegDataDecoder(aLib, aTaskQueue, aCallback, GetCodecId(aConfig.mMimeType))
TaskQueue* aTaskQueue, const VideoInfo& aConfig, ImageContainer* aImageContainer)
: FFmpegDataDecoder(aLib, aTaskQueue, GetCodecId(aConfig.mMimeType))
, mImageContainer(aImageContainer)
, mInfo(aConfig)
, mCodecParser(nullptr)
@ -161,15 +159,21 @@ FFmpegVideoDecoder<LIBAV_VER>::InitCodecContext()
}
}
MediaResult
FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample)
RefPtr<MediaDataDecoder::DecodePromise>
FFmpegVideoDecoder<LIBAV_VER>::ProcessDecode(MediaRawData* aSample)
{
bool gotFrame = false;
return DoDecode(aSample, &gotFrame);
DecodedData results;
MediaResult rv = DoDecode(aSample, &gotFrame, results);
if (NS_FAILED(rv)) {
return DecodePromise::CreateAndReject(rv, __func__);
}
return DecodePromise::CreateAndResolve(Move(results), __func__);
}
MediaResult
FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample, bool* aGotFrame)
FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample, bool* aGotFrame,
MediaDataDecoder::DecodedData& aResults)
{
uint8_t* inputData = const_cast<uint8_t*>(aSample->Data());
size_t inputSize = aSample->Size();
@ -194,7 +198,7 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample, bool* aGotFrame)
inputSize -= len;
if (size) {
bool gotFrame = false;
MediaResult rv = DoDecode(aSample, data, size, &gotFrame);
MediaResult rv = DoDecode(aSample, data, size, &gotFrame, aResults);
if (NS_FAILED(rv)) {
return rv;
}
@ -206,13 +210,14 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample, bool* aGotFrame)
return NS_OK;
}
#endif
return DoDecode(aSample, inputData, inputSize, aGotFrame);
return DoDecode(aSample, inputData, inputSize, aGotFrame, aResults);
}
MediaResult
FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample,
uint8_t* aData, int aSize,
bool* aGotFrame)
bool* aGotFrame,
MediaDataDecoder::DecodedData& aResults)
{
AVPacket packet;
mLib->av_init_packet(&packet);
@ -337,29 +342,31 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample,
return MediaResult(NS_ERROR_OUT_OF_MEMORY,
RESULT_DETAIL("image allocation error"));
}
mCallback->Output(v);
aResults.AppendElement(Move(v));
if (aGotFrame) {
*aGotFrame = true;
}
return NS_OK;
}
void
RefPtr<MediaDataDecoder::DecodePromise>
FFmpegVideoDecoder<LIBAV_VER>::ProcessDrain()
{
RefPtr<MediaRawData> empty(new MediaRawData());
empty->mTimecode = mLastInputDts;
bool gotFrame = false;
while (NS_SUCCEEDED(DoDecode(empty, &gotFrame)) && gotFrame);
mCallback->DrainComplete();
DecodedData results;
while (NS_SUCCEEDED(DoDecode(empty, &gotFrame, results)) && gotFrame) {
}
return DecodePromise::CreateAndResolve(Move(results), __func__);
}
void
RefPtr<MediaDataDecoder::FlushPromise>
FFmpegVideoDecoder<LIBAV_VER>::ProcessFlush()
{
mPtsContext.Reset();
mDurationMap.Clear();
FFmpegDataDecoder::ProcessFlush();
return FFmpegDataDecoder::ProcessFlush();
}
FFmpegVideoDecoder<LIBAV_VER>::~FFmpegVideoDecoder()

Просмотреть файл

@ -28,7 +28,6 @@ class FFmpegVideoDecoder<LIBAV_VER> : public FFmpegDataDecoder<LIBAV_VER>
public:
FFmpegVideoDecoder(FFmpegLibWrapper* aLib, TaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback,
const VideoInfo& aConfig,
ImageContainer* aImageContainer);
virtual ~FFmpegVideoDecoder();
@ -46,11 +45,11 @@ public:
static AVCodecID GetCodecId(const nsACString& aMimeType);
private:
MediaResult DoDecode(MediaRawData* aSample) override;
MediaResult DoDecode(MediaRawData* aSample, bool* aGotFrame);
MediaResult DoDecode(MediaRawData* aSample, uint8_t* aData, int aSize, bool* aGotFrame);
void ProcessDrain() override;
void ProcessFlush() override;
RefPtr<DecodePromise> ProcessDecode(MediaRawData* aSample) override;
RefPtr<DecodePromise> ProcessDrain() override;
RefPtr<FlushPromise> ProcessFlush() override;
MediaResult DoDecode(MediaRawData* aSample, bool* aGotFrame, DecodedData& aResults);
MediaResult DoDecode(MediaRawData* aSample, uint8_t* aData, int aSize, bool* aGotFrame, DecodedData& aResults);
void OutputDelayedFrames();
/**

Просмотреть файл

@ -1,268 +0,0 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "MediaCodecProxy.h"
#include <OMX_IVCommon.h>
#include <gui/Surface.h>
#include <ICrypto.h>
#include "GonkAudioDecoderManager.h"
#include "MediaDecoderReader.h"
#include "VideoUtils.h"
#include "nsTArray.h"
#include "mozilla/Logging.h"
#include "stagefright/MediaBuffer.h"
#include "stagefright/MetaData.h"
#include "stagefright/MediaErrors.h"
#include <stagefright/foundation/AMessage.h>
#include <stagefright/foundation/ALooper.h>
#include "media/openmax/OMX_Audio.h"
#include "MediaData.h"
#include "MediaInfo.h"
#define CODECCONFIG_TIMEOUT_US 10000LL
#define READ_OUTPUT_BUFFER_TIMEOUT_US 0LL
#include <android/log.h>
#define GADM_LOG(...) __android_log_print(ANDROID_LOG_DEBUG, "GonkAudioDecoderManager", __VA_ARGS__)
#define LOG(...) MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
using namespace android;
typedef android::MediaCodecProxy MediaCodecProxy;
namespace mozilla {
GonkAudioDecoderManager::GonkAudioDecoderManager(const AudioInfo& aConfig)
: mAudioChannels(aConfig.mChannels)
, mAudioRate(aConfig.mRate)
, mAudioProfile(aConfig.mProfile)
, mAudioCompactor(mAudioQueue)
{
MOZ_COUNT_CTOR(GonkAudioDecoderManager);
MOZ_ASSERT(mAudioChannels);
mCodecSpecificData = aConfig.mCodecSpecificConfig;
mMimeType = aConfig.mMimeType;
}
GonkAudioDecoderManager::~GonkAudioDecoderManager()
{
MOZ_COUNT_DTOR(GonkAudioDecoderManager);
}
RefPtr<MediaDataDecoder::InitPromise>
GonkAudioDecoderManager::Init()
{
if (InitMediaCodecProxy()) {
return InitPromise::CreateAndResolve(TrackType::kAudioTrack, __func__);
} else {
return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
}
}
bool
GonkAudioDecoderManager::InitMediaCodecProxy()
{
status_t rv = OK;
if (!InitLoopers(MediaData::AUDIO_DATA)) {
return false;
}
mDecoder = MediaCodecProxy::CreateByType(mDecodeLooper, mMimeType.get(), false);
if (!mDecoder.get()) {
return false;
}
if (!mDecoder->AllocateAudioMediaCodec())
{
mDecoder = nullptr;
return false;
}
sp<AMessage> format = new AMessage;
// Fixed values
GADM_LOG("Configure audio mime type:%s, chan no:%d, sample-rate:%d, profile:%d",
mMimeType.get(), mAudioChannels, mAudioRate, mAudioProfile);
format->setString("mime", mMimeType.get());
format->setInt32("channel-count", mAudioChannels);
format->setInt32("sample-rate", mAudioRate);
format->setInt32("aac-profile", mAudioProfile);
status_t err = mDecoder->configure(format, nullptr, nullptr, 0);
if (err != OK || !mDecoder->Prepare()) {
return false;
}
if (mMimeType.EqualsLiteral("audio/mp4a-latm")) {
rv = mDecoder->Input(mCodecSpecificData->Elements(), mCodecSpecificData->Length(), 0,
android::MediaCodec::BUFFER_FLAG_CODECCONFIG,
CODECCONFIG_TIMEOUT_US);
}
if (rv == OK) {
return true;
} else {
GADM_LOG("Failed to input codec specific data!");
return false;
}
}
nsresult
GonkAudioDecoderManager::CreateAudioData(MediaBuffer* aBuffer, int64_t aStreamOffset)
{
if (!(aBuffer != nullptr && aBuffer->data() != nullptr)) {
GADM_LOG("Audio Buffer is not valid!");
return NS_ERROR_UNEXPECTED;
}
int64_t timeUs;
if (!aBuffer->meta_data()->findInt64(kKeyTime, &timeUs)) {
return NS_ERROR_UNEXPECTED;
}
if (aBuffer->range_length() == 0) {
// Some decoders may return spurious empty buffers that we just want to ignore
// quoted from Android's AwesomePlayer.cpp
return NS_ERROR_NOT_AVAILABLE;
}
if (mLastTime > timeUs) {
GADM_LOG("Output decoded sample time is revert. time=%lld", timeUs);
MOZ_ASSERT(false);
return NS_ERROR_NOT_AVAILABLE;
}
mLastTime = timeUs;
const uint8_t *data = static_cast<const uint8_t*>(aBuffer->data());
size_t dataOffset = aBuffer->range_offset();
size_t size = aBuffer->range_length();
uint32_t frames = size / (2 * mAudioChannels);
CheckedInt64 duration = FramesToUsecs(frames, mAudioRate);
if (!duration.isValid()) {
return NS_ERROR_UNEXPECTED;
}
typedef AudioCompactor::NativeCopy OmxCopy;
mAudioCompactor.Push(aStreamOffset,
timeUs,
mAudioRate,
frames,
mAudioChannels,
OmxCopy(data+dataOffset,
size,
mAudioChannels));
return NS_OK;
}
nsresult
GonkAudioDecoderManager::Output(int64_t aStreamOffset,
RefPtr<MediaData>& aOutData)
{
aOutData = nullptr;
if (mAudioQueue.GetSize() > 0) {
aOutData = mAudioQueue.PopFront();
return mAudioQueue.AtEndOfStream() ? NS_ERROR_ABORT : NS_OK;
}
status_t err;
MediaBuffer* audioBuffer = nullptr;
err = mDecoder->Output(&audioBuffer, READ_OUTPUT_BUFFER_TIMEOUT_US);
AutoReleaseMediaBuffer a(audioBuffer, mDecoder.get());
switch (err) {
case OK:
{
nsresult rv = CreateAudioData(audioBuffer, aStreamOffset);
NS_ENSURE_SUCCESS(rv, rv);
break;
}
case android::INFO_FORMAT_CHANGED:
{
// If the format changed, update our cached info.
GADM_LOG("Decoder format changed");
sp<AMessage> audioCodecFormat;
if (mDecoder->getOutputFormat(&audioCodecFormat) != OK ||
audioCodecFormat == nullptr) {
return NS_ERROR_UNEXPECTED;
}
int32_t codec_channel_count = 0;
int32_t codec_sample_rate = 0;
if (!audioCodecFormat->findInt32("channel-count", &codec_channel_count) ||
!audioCodecFormat->findInt32("sample-rate", &codec_sample_rate)) {
return NS_ERROR_UNEXPECTED;
}
// Update AudioInfo
AudioConfig::ChannelLayout layout(codec_channel_count);
if (!layout.IsValid()) {
return NS_ERROR_FAILURE;
}
mAudioChannels = codec_channel_count;
mAudioRate = codec_sample_rate;
return Output(aStreamOffset, aOutData);
}
case android::INFO_OUTPUT_BUFFERS_CHANGED:
{
GADM_LOG("Info Output Buffers Changed");
if (mDecoder->UpdateOutputBuffers()) {
return Output(aStreamOffset, aOutData);
}
return NS_ERROR_FAILURE;
}
case -EAGAIN:
{
return NS_ERROR_NOT_AVAILABLE;
}
case android::ERROR_END_OF_STREAM:
{
GADM_LOG("Got EOS frame!");
nsresult rv = CreateAudioData(audioBuffer, aStreamOffset);
NS_ENSURE_SUCCESS(rv, NS_ERROR_ABORT);
MOZ_ASSERT(mAudioQueue.GetSize() > 0);
mAudioQueue.Finish();
break;
}
case -ETIMEDOUT:
{
GADM_LOG("Timeout. can try again next time");
return NS_ERROR_UNEXPECTED;
}
default:
{
GADM_LOG("Decoder failed, err=%d", err);
return NS_ERROR_UNEXPECTED;
}
}
if (mAudioQueue.GetSize() > 0) {
aOutData = mAudioQueue.PopFront();
// Return NS_ERROR_ABORT at the last sample.
return mAudioQueue.AtEndOfStream() ? NS_ERROR_ABORT : NS_OK;
}
return NS_ERROR_NOT_AVAILABLE;
}
void
GonkAudioDecoderManager::ProcessFlush()
{
GADM_LOG("FLUSH<<<");
mAudioQueue.Reset();
GADM_LOG(">>>FLUSH");
GonkDecoderManager::ProcessFlush();
}
void
GonkAudioDecoderManager::ResetEOS()
{
GADM_LOG("ResetEOS(<<<");
mAudioQueue.Reset();
GADM_LOG(">>>ResetEOS(");
GonkDecoderManager::ResetEOS();
}
} // namespace mozilla

Просмотреть файл

@ -1,59 +0,0 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#if !defined(GonkAudioDecoderManager_h_)
#define GonkAudioDecoderManager_h_
#include "AudioCompactor.h"
#include "mozilla/RefPtr.h"
#include "GonkMediaDataDecoder.h"
using namespace android;
namespace android {
class MOZ_EXPORT MediaBuffer;
} // namespace android
namespace mozilla {
class GonkAudioDecoderManager : public GonkDecoderManager {
typedef android::MediaCodecProxy MediaCodecProxy;
public:
GonkAudioDecoderManager(const AudioInfo& aConfig);
virtual ~GonkAudioDecoderManager();
RefPtr<InitPromise> Init() override;
nsresult Output(int64_t aStreamOffset,
RefPtr<MediaData>& aOutput) override;
void ProcessFlush() override;
virtual void ResetEOS() override;
const char* GetDescriptionName() const override
{
return "gonk audio decoder";
}
private:
bool InitMediaCodecProxy();
nsresult CreateAudioData(MediaBuffer* aBuffer, int64_t aStreamOffset);
uint32_t mAudioChannels;
uint32_t mAudioRate;
const uint32_t mAudioProfile;
MediaQueue<AudioData> mAudioQueue;
AudioCompactor mAudioCompactor;
};
} // namespace mozilla
#endif // GonkAudioDecoderManager_h_

Просмотреть файл

@ -1,63 +0,0 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "GonkDecoderModule.h"
#include "GonkVideoDecoderManager.h"
#include "GonkAudioDecoderManager.h"
#include "mozilla/DebugOnly.h"
#include "GonkMediaDataDecoder.h"
namespace mozilla {
GonkDecoderModule::GonkDecoderModule()
{
}
GonkDecoderModule::~GonkDecoderModule()
{
}
already_AddRefed<MediaDataDecoder>
GonkDecoderModule::CreateVideoDecoder(const CreateDecoderParams& aParams)
{
RefPtr<MediaDataDecoder> decoder =
new GonkMediaDataDecoder(new GonkVideoDecoderManager(aParams.mImageContainer, aParams.VideoConfig()),
aParams.mCallback);
return decoder.forget();
}
already_AddRefed<MediaDataDecoder>
GonkDecoderModule::CreateAudioDecoder(const CreateDecoderParams& aParams)
{
RefPtr<MediaDataDecoder> decoder =
new GonkMediaDataDecoder(new GonkAudioDecoderManager(aParams.AudioConfig()),
aParams.mCallback);
return decoder.forget();
}
PlatformDecoderModule::ConversionRequired
GonkDecoderModule::DecoderNeedsConversion(const TrackInfo& aConfig) const
{
if (aConfig.IsVideo()) {
return ConversionRequired::kNeedAnnexB;
} else {
return ConversionRequired::kNeedNone;
}
}
bool
GonkDecoderModule::SupportsMimeType(const nsACString& aMimeType,
DecoderDoctorDiagnostics* aDiagnostics) const
{
return aMimeType.EqualsLiteral("audio/mp4a-latm") ||
aMimeType.EqualsLiteral("audio/3gpp") ||
aMimeType.EqualsLiteral("audio/amr-wb") ||
aMimeType.EqualsLiteral("audio/mpeg") ||
aMimeType.EqualsLiteral("video/mp4") ||
aMimeType.EqualsLiteral("video/mp4v-es") ||
aMimeType.EqualsLiteral("video/avc") ||
aMimeType.EqualsLiteral("video/3gpp");
}
} // namespace mozilla

Просмотреть файл

@ -1,37 +0,0 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#if !defined(GonkPlatformDecoderModule_h_)
#define GonkPlatformDecoderModule_h_
#include "PlatformDecoderModule.h"
namespace mozilla {
class GonkDecoderModule : public PlatformDecoderModule {
public:
GonkDecoderModule();
virtual ~GonkDecoderModule();
// Decode thread.
already_AddRefed<MediaDataDecoder>
CreateVideoDecoder(const CreateDecoderParams& aParams) override;
// Decode thread.
already_AddRefed<MediaDataDecoder>
CreateAudioDecoder(const CreateDecoderParams& aParams) override;
ConversionRequired
DecoderNeedsConversion(const TrackInfo& aConfig) const override;
bool SupportsMimeType(const nsACString& aMimeType,
DecoderDoctorDiagnostics* aDiagnostics) const override;
};
} // namespace mozilla
#endif

Просмотреть файл

@ -1,385 +0,0 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "GonkMediaDataDecoder.h"
#include "VideoUtils.h"
#include "nsTArray.h"
#include "MediaCodecProxy.h"
#include <stagefright/foundation/ADebug.h>
#include "mozilla/Logging.h"
#include <android/log.h>
#define GMDD_LOG(...) __android_log_print(ANDROID_LOG_DEBUG, "GonkMediaDataDecoder", __VA_ARGS__)
#define INPUT_TIMEOUT_US 0LL // Don't wait for buffer if none is available.
#define MIN_QUEUED_SAMPLES 2
#ifdef DEBUG
#include <utils/AndroidThreads.h>
#endif
#define LOG(...) MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
using namespace android;
namespace mozilla {
bool
GonkDecoderManager::InitLoopers(MediaData::Type aType)
{
MOZ_ASSERT(mDecodeLooper.get() == nullptr && mTaskLooper.get() == nullptr);
MOZ_ASSERT(aType == MediaData::VIDEO_DATA || aType == MediaData::AUDIO_DATA);
const char* suffix = (aType == MediaData::VIDEO_DATA ? "video" : "audio");
mDecodeLooper = new ALooper;
android::AString name("MediaCodecProxy/");
name.append(suffix);
mDecodeLooper->setName(name.c_str());
mTaskLooper = new ALooper;
name.setTo("GonkDecoderManager/");
name.append(suffix);
mTaskLooper->setName(name.c_str());
mTaskLooper->registerHandler(this);
#ifdef DEBUG
sp<AMessage> findThreadId(new AMessage(kNotifyFindLooperId, id()));
findThreadId->post();
#endif
return mDecodeLooper->start() == OK && mTaskLooper->start() == OK;
}
nsresult
GonkDecoderManager::Input(MediaRawData* aSample)
{
RefPtr<MediaRawData> sample;
if (aSample) {
sample = aSample;
} else {
// It means EOS with empty sample.
sample = new MediaRawData();
}
{
MutexAutoLock lock(mMutex);
mQueuedSamples.AppendElement(sample);
}
sp<AMessage> input = new AMessage(kNotifyProcessInput, id());
if (!aSample) {
input->setInt32("input-eos", 1);
}
input->post();
return NS_OK;
}
int32_t
GonkDecoderManager::ProcessQueuedSamples()
{
MOZ_ASSERT(OnTaskLooper());
MutexAutoLock lock(mMutex);
status_t rv;
while (mQueuedSamples.Length()) {
RefPtr<MediaRawData> data = mQueuedSamples.ElementAt(0);
rv = mDecoder->Input(reinterpret_cast<const uint8_t*>(data->Data()),
data->Size(),
data->mTime,
0,
INPUT_TIMEOUT_US);
if (rv == OK) {
mQueuedSamples.RemoveElementAt(0);
mWaitOutput.AppendElement(WaitOutputInfo(data->mOffset, data->mTime,
/* eos */ data->Data() == nullptr));
} else if (rv == -EAGAIN || rv == -ETIMEDOUT) {
// In most cases, EAGAIN or ETIMEOUT are safe because OMX can't fill
// buffer on time.
break;
} else {
return rv;
}
}
return mQueuedSamples.Length();
}
nsresult
GonkDecoderManager::Flush()
{
if (mDecoder == nullptr) {
GMDD_LOG("Decoder is not initialized");
return NS_ERROR_UNEXPECTED;
}
if (!mInitPromise.IsEmpty()) {
return NS_OK;
}
{
MutexAutoLock lock(mMutex);
mQueuedSamples.Clear();
}
MonitorAutoLock lock(mFlushMonitor);
mIsFlushing = true;
sp<AMessage> flush = new AMessage(kNotifyProcessFlush, id());
flush->post();
while (mIsFlushing) {
lock.Wait();
}
return NS_OK;
}
nsresult
GonkDecoderManager::Shutdown()
{
if (mDecoder.get()) {
mDecoder->stop();
mDecoder->ReleaseMediaResources();
mDecoder = nullptr;
}
mInitPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
return NS_OK;
}
size_t
GonkDecoderManager::NumQueuedSamples()
{
MutexAutoLock lock(mMutex);
return mQueuedSamples.Length();
}
void
GonkDecoderManager::ProcessInput(bool aEndOfStream)
{
MOZ_ASSERT(OnTaskLooper());
status_t rv = ProcessQueuedSamples();
if (rv >= 0) {
if (!aEndOfStream && rv <= MIN_QUEUED_SAMPLES) {
mDecodeCallback->InputExhausted();
}
if (mToDo.get() == nullptr) {
mToDo = new AMessage(kNotifyDecoderActivity, id());
if (aEndOfStream) {
mToDo->setInt32("input-eos", 1);
}
mDecoder->requestActivityNotification(mToDo);
} else if (aEndOfStream) {
mToDo->setInt32("input-eos", 1);
}
} else {
GMDD_LOG("input processed: error#%d", rv);
mDecodeCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
__func__));
}
}
void
GonkDecoderManager::ProcessFlush()
{
MOZ_ASSERT(OnTaskLooper());
mLastTime = INT64_MIN;
MonitorAutoLock lock(mFlushMonitor);
mWaitOutput.Clear();
if (mDecoder->flush() != OK) {
GMDD_LOG("flush error");
mDecodeCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
__func__));
}
mIsFlushing = false;
lock.NotifyAll();
}
// Use output timestamp to determine which output buffer is already returned
// and remove corresponding info, except for EOS, from the waiting list.
// This method handles the cases that audio decoder sends multiple output
// buffers for one input.
void
GonkDecoderManager::UpdateWaitingList(int64_t aForgetUpTo)
{
MOZ_ASSERT(OnTaskLooper());
size_t i;
for (i = 0; i < mWaitOutput.Length(); i++) {
const auto& item = mWaitOutput.ElementAt(i);
if (item.mEOS || item.mTimestamp > aForgetUpTo) {
break;
}
}
if (i > 0) {
mWaitOutput.RemoveElementsAt(0, i);
}
}
void
GonkDecoderManager::ProcessToDo(bool aEndOfStream)
{
MOZ_ASSERT(OnTaskLooper());
MOZ_ASSERT(mToDo.get() != nullptr);
mToDo.clear();
if (NumQueuedSamples() > 0 && ProcessQueuedSamples() < 0) {
mDecodeCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
__func__));
return;
}
while (mWaitOutput.Length() > 0) {
RefPtr<MediaData> output;
WaitOutputInfo wait = mWaitOutput.ElementAt(0);
nsresult rv = Output(wait.mOffset, output);
if (rv == NS_OK) {
MOZ_ASSERT(output);
mDecodeCallback->Output(output);
UpdateWaitingList(output->mTime);
} else if (rv == NS_ERROR_ABORT) {
// EOS
MOZ_ASSERT(mQueuedSamples.IsEmpty());
if (output) {
mDecodeCallback->Output(output);
UpdateWaitingList(output->mTime);
}
MOZ_ASSERT(mWaitOutput.Length() == 1);
mWaitOutput.RemoveElementAt(0);
mDecodeCallback->DrainComplete();
ResetEOS();
return;
} else if (rv == NS_ERROR_NOT_AVAILABLE) {
break;
} else {
mDecodeCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
__func__));
return;
}
}
if (!aEndOfStream && NumQueuedSamples() <= MIN_QUEUED_SAMPLES) {
mDecodeCallback->InputExhausted();
// No need to shedule todo task this time because InputExhausted() will
// cause Input() to be invoked and do it for us.
return;
}
if (NumQueuedSamples() || mWaitOutput.Length() > 0) {
mToDo = new AMessage(kNotifyDecoderActivity, id());
if (aEndOfStream) {
mToDo->setInt32("input-eos", 1);
}
mDecoder->requestActivityNotification(mToDo);
}
}
void
GonkDecoderManager::ResetEOS()
{
// After eos, android::MediaCodec needs to be flushed to receive next input
mWaitOutput.Clear();
if (mDecoder->flush() != OK) {
GMDD_LOG("flush error");
mDecodeCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
__func__));
}
}
void
GonkDecoderManager::onMessageReceived(const sp<AMessage> &aMessage)
{
switch (aMessage->what()) {
case kNotifyProcessInput:
{
int32_t eos = 0;
ProcessInput(aMessage->findInt32("input-eos", &eos) && eos);
break;
}
case kNotifyProcessFlush:
{
ProcessFlush();
break;
}
case kNotifyDecoderActivity:
{
int32_t eos = 0;
ProcessToDo(aMessage->findInt32("input-eos", &eos) && eos);
break;
}
#ifdef DEBUG
case kNotifyFindLooperId:
{
mTaskLooperId = androidGetThreadId();
MOZ_ASSERT(mTaskLooperId);
break;
}
#endif
default:
{
TRESPASS();
break;
}
}
}
#ifdef DEBUG
bool
GonkDecoderManager::OnTaskLooper()
{
return androidGetThreadId() == mTaskLooperId;
}
#endif
GonkMediaDataDecoder::GonkMediaDataDecoder(GonkDecoderManager* aManager,
MediaDataDecoderCallback* aCallback)
: mManager(aManager)
{
MOZ_COUNT_CTOR(GonkMediaDataDecoder);
mManager->SetDecodeCallback(aCallback);
}
GonkMediaDataDecoder::~GonkMediaDataDecoder()
{
MOZ_COUNT_DTOR(GonkMediaDataDecoder);
}
RefPtr<MediaDataDecoder::InitPromise>
GonkMediaDataDecoder::Init()
{
return mManager->Init();
}
void
GonkMediaDataDecoder::Shutdown()
{
mManager->Shutdown();
// Because codec allocated runnable and init promise is at reader TaskQueue,
// so manager needs to be destroyed at reader TaskQueue to prevent racing.
mManager = nullptr;
}
// Inserts data into the decoder's pipeline.
void
GonkMediaDataDecoder::Input(MediaRawData* aSample)
{
mManager->Input(aSample);
}
void
GonkMediaDataDecoder::Flush()
{
mManager->Flush();
}
void
GonkMediaDataDecoder::Drain()
{
mManager->Input(nullptr);
}
} // namespace mozilla

Просмотреть файл

@ -1,214 +0,0 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#if !defined(GonkMediaDataDecoder_h_)
#define GonkMediaDataDecoder_h_
#include "PlatformDecoderModule.h"
#include <stagefright/foundation/AHandler.h>
namespace android {
struct ALooper;
class MediaBuffer;
class MediaCodecProxy;
} // namespace android
namespace mozilla {
class MediaRawData;
// Manage the data flow from inputting encoded data and outputting decode data.
class GonkDecoderManager : public android::AHandler {
public:
typedef TrackInfo::TrackType TrackType;
typedef MediaDataDecoder::InitPromise InitPromise;
virtual ~GonkDecoderManager() {}
virtual RefPtr<InitPromise> Init() = 0;
virtual const char* GetDescriptionName() const = 0;
// Asynchronously send sample into mDecoder. If out of input buffer, aSample
// will be queued for later re-send.
nsresult Input(MediaRawData* aSample);
// Flush the queued samples and signal decoder to throw all pending input/output away.
nsresult Flush();
// Shutdown decoder and rejects the init promise.
virtual nsresult Shutdown();
// How many samples are waiting for processing.
size_t NumQueuedSamples();
// Set callback for decoder events, such as requesting more input,
// returning output, or reporting error.
void SetDecodeCallback(MediaDataDecoderCallback* aCallback)
{
mDecodeCallback = aCallback;
}
protected:
GonkDecoderManager()
: mMutex("GonkDecoderManager")
, mLastTime(INT64_MIN)
, mFlushMonitor("GonkDecoderManager::Flush")
, mIsFlushing(false)
, mDecodeCallback(nullptr)
{}
bool InitLoopers(MediaData::Type aType);
void onMessageReceived(const android::sp<android::AMessage> &aMessage) override;
// Produces decoded output. It returns NS_OK on success, or NS_ERROR_NOT_AVAILABLE
// when output is not produced yet.
// If this returns a failure code other than NS_ERROR_NOT_AVAILABLE, an error
// will be reported through mDecodeCallback.
virtual nsresult Output(int64_t aStreamOffset,
RefPtr<MediaData>& aOutput) = 0;
// Send queued samples to OMX. It returns how many samples are still in
// queue after processing, or negative error code if failed.
int32_t ProcessQueuedSamples();
void ProcessInput(bool aEndOfStream);
virtual void ProcessFlush();
void ProcessToDo(bool aEndOfStream);
virtual void ResetEOS();
RefPtr<MediaByteBuffer> mCodecSpecificData;
nsAutoCString mMimeType;
// MediaCodedc's wrapper that performs the decoding.
android::sp<android::MediaCodecProxy> mDecoder;
// Looper for mDecoder to run on.
android::sp<android::ALooper> mDecodeLooper;
// Looper to run decode tasks such as processing input, output, flush, and
// recycling output buffers.
android::sp<android::ALooper> mTaskLooper;
// Message codes for tasks running on mTaskLooper.
enum {
// Decoder will send this to indicate internal state change such as input or
// output buffers availability. Used to run pending input & output tasks.
kNotifyDecoderActivity = 'nda ',
// Signal the decoder to flush.
kNotifyProcessFlush = 'npf ',
// Used to process queued samples when there is new input.
kNotifyProcessInput = 'npi ',
#ifdef DEBUG
kNotifyFindLooperId = 'nfli',
#endif
};
MozPromiseHolder<InitPromise> mInitPromise;
Mutex mMutex; // Protects mQueuedSamples.
// A queue that stores the samples waiting to be sent to mDecoder.
// Empty element means EOS and there shouldn't be any sample be queued after it.
// Samples are queued in caller's thread and dequeued in mTaskLooper.
nsTArray<RefPtr<MediaRawData>> mQueuedSamples;
// The last decoded frame presentation time. Only accessed on mTaskLooper.
int64_t mLastTime;
Monitor mFlushMonitor; // Waits for flushing to complete.
bool mIsFlushing; // Protected by mFlushMonitor.
// Remembers the notification that is currently waiting for the decoder event
// to avoid requesting more than one notification at the time, which is
// forbidden by mDecoder.
android::sp<android::AMessage> mToDo;
// Stores sample info for output buffer processing later.
struct WaitOutputInfo {
WaitOutputInfo(int64_t aOffset, int64_t aTimestamp, bool aEOS)
: mOffset(aOffset)
, mTimestamp(aTimestamp)
, mEOS(aEOS)
{}
const int64_t mOffset;
const int64_t mTimestamp;
const bool mEOS;
};
nsTArray<WaitOutputInfo> mWaitOutput;
MediaDataDecoderCallback* mDecodeCallback; // Reports decoder output or error.
private:
void UpdateWaitingList(int64_t aForgetUpTo);
#ifdef DEBUG
typedef void* LooperId;
bool OnTaskLooper();
LooperId mTaskLooperId;
#endif
};
class AutoReleaseMediaBuffer
{
public:
AutoReleaseMediaBuffer(android::MediaBuffer* aBuffer, android::MediaCodecProxy* aCodec)
: mBuffer(aBuffer)
, mCodec(aCodec)
{}
~AutoReleaseMediaBuffer()
{
MOZ_ASSERT(mCodec.get());
if (mBuffer) {
mCodec->ReleaseMediaBuffer(mBuffer);
}
}
android::MediaBuffer* forget()
{
android::MediaBuffer* tmp = mBuffer;
mBuffer = nullptr;
return tmp;
}
private:
android::MediaBuffer* mBuffer;
android::sp<android::MediaCodecProxy> mCodec;
};
// Samples are decoded using the GonkDecoder (MediaCodec)
// created by the GonkDecoderManager. This class implements
// the higher-level logic that drives mapping the Gonk to the async
// MediaDataDecoder interface. The specifics of decoding the exact stream
// type are handled by GonkDecoderManager and the GonkDecoder it creates.
class GonkMediaDataDecoder : public MediaDataDecoder {
public:
GonkMediaDataDecoder(GonkDecoderManager* aDecoderManager,
MediaDataDecoderCallback* aCallback);
~GonkMediaDataDecoder();
RefPtr<InitPromise> Init() override;
void Input(MediaRawData* aSample) override;
void Flush() override;
void Drain() override;
void Shutdown() override;
const char* GetDescriptionName() const override
{
return "gonk decoder";
}
private:
android::sp<GonkDecoderManager> mManager;
};
} // namespace mozilla
#endif // GonkMediaDataDecoder_h_

Просмотреть файл

@ -1,772 +0,0 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "MediaCodecProxy.h"
#include <OMX_IVCommon.h>
#include <gui/Surface.h>
#include <ICrypto.h>
#include "GonkVideoDecoderManager.h"
#include "GrallocImages.h"
#include "MediaDecoderReader.h"
#include "ImageContainer.h"
#include "VideoUtils.h"
#include "nsThreadUtils.h"
#include "Layers.h"
#include "mozilla/Logging.h"
#include <stagefright/MediaBuffer.h>
#include <stagefright/MetaData.h>
#include <stagefright/MediaErrors.h>
#include <stagefright/foundation/AString.h>
#include "GonkNativeWindow.h"
#include "mozilla/layers/GrallocTextureClient.h"
#include "mozilla/layers/ImageBridgeChild.h"
#include "mozilla/layers/TextureClient.h"
#include "mozilla/layers/TextureClientRecycleAllocator.h"
#include <cutils/properties.h>
#define CODECCONFIG_TIMEOUT_US 10000LL
#define READ_OUTPUT_BUFFER_TIMEOUT_US 0LL
#include <android/log.h>
#define GVDM_LOG(...) __android_log_print(ANDROID_LOG_DEBUG, "GonkVideoDecoderManager", __VA_ARGS__)
#define LOG(...) MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
using namespace mozilla::layers;
using namespace android;
typedef android::MediaCodecProxy MediaCodecProxy;
namespace mozilla {
class GonkTextureClientAllocationHelper : public layers::ITextureClientAllocationHelper
{
public:
GonkTextureClientAllocationHelper(uint32_t aGrallocFormat,
gfx::IntSize aSize)
: ITextureClientAllocationHelper(gfx::SurfaceFormat::UNKNOWN,
aSize,
BackendSelector::Content,
TextureFlags::DEALLOCATE_CLIENT,
ALLOC_DISALLOW_BUFFERTEXTURECLIENT)
, mGrallocFormat(aGrallocFormat)
{}
already_AddRefed<TextureClient> Allocate(KnowsCompositor* aAllocator) override
{
uint32_t usage = android::GraphicBuffer::USAGE_SW_READ_OFTEN |
android::GraphicBuffer::USAGE_SW_WRITE_OFTEN |
android::GraphicBuffer::USAGE_HW_TEXTURE;
GrallocTextureData* texData = GrallocTextureData::Create(mSize, mGrallocFormat,
gfx::BackendType::NONE,
usage, aAllocator->GetTextureForwarder());
if (!texData) {
return nullptr;
}
sp<GraphicBuffer> graphicBuffer = texData->GetGraphicBuffer();
if (!graphicBuffer.get()) {
return nullptr;
}
RefPtr<TextureClient> textureClient =
TextureClient::CreateWithData(texData, TextureFlags::DEALLOCATE_CLIENT, aAllocator->GetTextureForwarder());
return textureClient.forget();
}
bool IsCompatible(TextureClient* aTextureClient) override
{
if (!aTextureClient) {
return false;
}
sp<GraphicBuffer> graphicBuffer =
static_cast<GrallocTextureData*>(aTextureClient->GetInternalData())->GetGraphicBuffer();
if (!graphicBuffer.get() ||
static_cast<uint32_t>(graphicBuffer->getPixelFormat()) != mGrallocFormat ||
aTextureClient->GetSize() != mSize) {
return false;
}
return true;
}
private:
uint32_t mGrallocFormat;
};
GonkVideoDecoderManager::GonkVideoDecoderManager(
mozilla::layers::ImageContainer* aImageContainer,
const VideoInfo& aConfig)
: mConfig(aConfig)
, mImageContainer(aImageContainer)
, mColorConverterBufferSize(0)
, mPendingReleaseItemsLock("GonkVideoDecoderManager::mPendingReleaseItemsLock")
, mNeedsCopyBuffer(false)
{
MOZ_COUNT_CTOR(GonkVideoDecoderManager);
}
GonkVideoDecoderManager::~GonkVideoDecoderManager()
{
MOZ_COUNT_DTOR(GonkVideoDecoderManager);
}
nsresult
GonkVideoDecoderManager::Shutdown()
{
mVideoCodecRequest.DisconnectIfExists();
return GonkDecoderManager::Shutdown();
}
RefPtr<MediaDataDecoder::InitPromise>
GonkVideoDecoderManager::Init()
{
mNeedsCopyBuffer = false;
uint32_t maxWidth, maxHeight;
char propValue[PROPERTY_VALUE_MAX];
property_get("ro.moz.omx.hw.max_width", propValue, "-1");
maxWidth = -1 == atoi(propValue) ? MAX_VIDEO_WIDTH : atoi(propValue);
property_get("ro.moz.omx.hw.max_height", propValue, "-1");
maxHeight = -1 == atoi(propValue) ? MAX_VIDEO_HEIGHT : atoi(propValue) ;
if (uint32_t(mConfig.mImage.width * mConfig.mImage.height) > maxWidth * maxHeight) {
GVDM_LOG("Video resolution exceeds hw codec capability");
return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
}
// Validate the container-reported frame and pictureRect sizes. This ensures
// that our video frame creation code doesn't overflow.
if (!IsValidVideoRegion(mConfig.mImage, mConfig.ImageRect(), mConfig.mDisplay)) {
GVDM_LOG("It is not a valid region");
return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
}
mReaderTaskQueue = AbstractThread::GetCurrent()->AsTaskQueue();
MOZ_ASSERT(mReaderTaskQueue);
if (mDecodeLooper.get() != nullptr) {
return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
}
if (!InitLoopers(MediaData::VIDEO_DATA)) {
return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
}
RefPtr<InitPromise> p = mInitPromise.Ensure(__func__);
android::sp<GonkVideoDecoderManager> self = this;
mDecoder = MediaCodecProxy::CreateByType(mDecodeLooper,
mConfig.mMimeType.get(),
false);
uint32_t capability = MediaCodecProxy::kEmptyCapability;
if (mDecoder->getCapability(&capability) == OK && (capability &
MediaCodecProxy::kCanExposeGraphicBuffer)) {
#if ANDROID_VERSION >= 21
sp<IGonkGraphicBufferConsumer> consumer;
GonkBufferQueue::createBufferQueue(&mGraphicBufferProducer, &consumer);
mNativeWindow = new GonkNativeWindow(consumer);
#else
mNativeWindow = new GonkNativeWindow();
#endif
}
mVideoCodecRequest.Begin(mDecoder->AsyncAllocateVideoMediaCodec()
->Then(mReaderTaskQueue, __func__,
[self] (bool) -> void {
self->mVideoCodecRequest.Complete();
self->codecReserved();
}, [self] (bool) -> void {
self->mVideoCodecRequest.Complete();
self->codecCanceled();
}));
return p;
}
nsresult
GonkVideoDecoderManager::CreateVideoData(MediaBuffer* aBuffer,
int64_t aStreamOffset,
VideoData **v)
{
*v = nullptr;
RefPtr<VideoData> data;
int64_t timeUs;
int32_t keyFrame;
if (aBuffer == nullptr) {
GVDM_LOG("Video Buffer is not valid!");
return NS_ERROR_UNEXPECTED;
}
AutoReleaseMediaBuffer autoRelease(aBuffer, mDecoder.get());
if (!aBuffer->meta_data()->findInt64(kKeyTime, &timeUs)) {
GVDM_LOG("Decoder did not return frame time");
return NS_ERROR_UNEXPECTED;
}
if (mLastTime > timeUs) {
GVDM_LOG("Output decoded sample time is revert. time=%lld", timeUs);
return NS_ERROR_NOT_AVAILABLE;
}
mLastTime = timeUs;
if (aBuffer->range_length() == 0) {
// Some decoders may return spurious empty buffers that we just want to ignore
// quoted from Android's AwesomePlayer.cpp
return NS_ERROR_NOT_AVAILABLE;
}
if (!aBuffer->meta_data()->findInt32(kKeyIsSyncFrame, &keyFrame)) {
keyFrame = 0;
}
gfx::IntRect picture =
mConfig.ScaledImageRect(mFrameInfo.mWidth, mFrameInfo.mHeight);
if (aBuffer->graphicBuffer().get()) {
data = CreateVideoDataFromGraphicBuffer(aBuffer, picture);
if (data && !mNeedsCopyBuffer) {
// RecycleCallback() will be responsible for release the buffer.
autoRelease.forget();
}
mNeedsCopyBuffer = false;
} else {
data = CreateVideoDataFromDataBuffer(aBuffer, picture);
}
if (!data) {
return NS_ERROR_UNEXPECTED;
}
// Fill necessary info.
data->mOffset = aStreamOffset;
data->mTime = timeUs;
data->mKeyframe = keyFrame;
data.forget(v);
return NS_OK;
}
// Copy pixels from one planar YUV to another.
static void
CopyYUV(PlanarYCbCrData& aSource, PlanarYCbCrData& aDestination)
{
// Fill Y plane.
uint8_t* srcY = aSource.mYChannel;
gfx::IntSize ySize = aSource.mYSize;
uint8_t* destY = aDestination.mYChannel;
// Y plane.
for (int i = 0; i < ySize.height; i++) {
memcpy(destY, srcY, ySize.width);
srcY += aSource.mYStride;
destY += aDestination.mYStride;
}
// Fill UV plane.
// Line start
uint8_t* srcU = aSource.mCbChannel;
uint8_t* srcV = aSource.mCrChannel;
uint8_t* destU = aDestination.mCbChannel;
uint8_t* destV = aDestination.mCrChannel;
gfx::IntSize uvSize = aSource.mCbCrSize;
for (int i = 0; i < uvSize.height; i++) {
uint8_t* su = srcU;
uint8_t* sv = srcV;
uint8_t* du = destU;
uint8_t* dv =destV;
for (int j = 0; j < uvSize.width; j++) {
*du++ = *su++;
*dv++ = *sv++;
// Move to next pixel.
su += aSource.mCbSkip;
sv += aSource.mCrSkip;
du += aDestination.mCbSkip;
dv += aDestination.mCrSkip;
}
// Move to next line.
srcU += aSource.mCbCrStride;
srcV += aSource.mCbCrStride;
destU += aDestination.mCbCrStride;
destV += aDestination.mCbCrStride;
}
}
inline static int
Align(int aX, int aAlign)
{
return (aX + aAlign - 1) & ~(aAlign - 1);
}
// Venus formats are doucmented in kernel/include/media/msm_media_info.h:
// * Y_Stride : Width aligned to 128
// * UV_Stride : Width aligned to 128
// * Y_Scanlines: Height aligned to 32
// * UV_Scanlines: Height/2 aligned to 16
// * Total size = align((Y_Stride * Y_Scanlines
// * + UV_Stride * UV_Scanlines + 4096), 4096)
static void
CopyVenus(uint8_t* aSrc, uint8_t* aDest, uint32_t aWidth, uint32_t aHeight)
{
size_t yStride = Align(aWidth, 128);
uint8_t* s = aSrc;
uint8_t* d = aDest;
for (size_t i = 0; i < aHeight; i++) {
memcpy(d, s, aWidth);
s += yStride;
d += yStride;
}
size_t uvStride = yStride;
size_t uvLines = (aHeight + 1) / 2;
size_t ySize = yStride * Align(aHeight, 32);
s = aSrc + ySize;
d = aDest + ySize;
for (size_t i = 0; i < uvLines; i++) {
memcpy(d, s, aWidth);
s += uvStride;
d += uvStride;
}
}
static void
CopyGraphicBuffer(sp<GraphicBuffer>& aSource, sp<GraphicBuffer>& aDestination)
{
void* srcPtr = nullptr;
aSource->lock(GraphicBuffer::USAGE_SW_READ_OFTEN, &srcPtr);
void* destPtr = nullptr;
aDestination->lock(GraphicBuffer::USAGE_SW_WRITE_OFTEN, &destPtr);
MOZ_ASSERT(srcPtr && destPtr);
// Build PlanarYCbCrData for source buffer.
PlanarYCbCrData srcData;
switch (aSource->getPixelFormat()) {
case HAL_PIXEL_FORMAT_YV12: {
// Android YV12 format is defined in system/core/include/system/graphics.h
srcData.mYChannel = static_cast<uint8_t*>(srcPtr);
srcData.mYSkip = 0;
srcData.mYSize.width = aSource->getWidth();
srcData.mYSize.height = aSource->getHeight();
srcData.mYStride = aSource->getStride();
// 4:2:0.
srcData.mCbCrSize.width = srcData.mYSize.width / 2;
srcData.mCbCrSize.height = srcData.mYSize.height / 2;
srcData.mCrChannel = srcData.mYChannel + (srcData.mYStride * srcData.mYSize.height);
// Aligned to 16 bytes boundary.
srcData.mCbCrStride = Align(srcData.mYStride / 2, 16);
srcData.mCrSkip = 0;
srcData.mCbChannel = srcData.mCrChannel + (srcData.mCbCrStride * srcData.mCbCrSize.height);
srcData.mCbSkip = 0;
// Build PlanarYCbCrData for destination buffer.
PlanarYCbCrData destData;
destData.mYChannel = static_cast<uint8_t*>(destPtr);
destData.mYSkip = 0;
destData.mYSize.width = aDestination->getWidth();
destData.mYSize.height = aDestination->getHeight();
destData.mYStride = aDestination->getStride();
// 4:2:0.
destData.mCbCrSize.width = destData.mYSize.width / 2;
destData.mCbCrSize.height = destData.mYSize.height / 2;
destData.mCrChannel = destData.mYChannel + (destData.mYStride * destData.mYSize.height);
// Aligned to 16 bytes boundary.
destData.mCbCrStride = Align(destData.mYStride / 2, 16);
destData.mCrSkip = 0;
destData.mCbChannel = destData.mCrChannel + (destData.mCbCrStride * destData.mCbCrSize.height);
destData.mCbSkip = 0;
CopyYUV(srcData, destData);
break;
}
case GrallocImage::HAL_PIXEL_FORMAT_YCbCr_420_SP_VENUS:
CopyVenus(static_cast<uint8_t*>(srcPtr),
static_cast<uint8_t*>(destPtr),
aSource->getWidth(),
aSource->getHeight());
break;
default:
NS_ERROR("Unsupported input gralloc image type. Should never be here.");
}
aSource->unlock();
aDestination->unlock();
}
already_AddRefed<VideoData>
GonkVideoDecoderManager::CreateVideoDataFromGraphicBuffer(MediaBuffer* aSource,
gfx::IntRect& aPicture)
{
sp<GraphicBuffer> srcBuffer(aSource->graphicBuffer());
RefPtr<TextureClient> textureClient;
if (mNeedsCopyBuffer) {
// Copy buffer contents for bug 1199809.
if (!mCopyAllocator) {
RefPtr<layers::ImageBridgeChild> bridge = layers::ImageBridgeChild::GetSingleton();
mCopyAllocator = new TextureClientRecycleAllocator(bridge);
}
if (!mCopyAllocator) {
GVDM_LOG("Create buffer allocator failed!");
return nullptr;
}
gfx::IntSize size(srcBuffer->getWidth(), srcBuffer->getHeight());
GonkTextureClientAllocationHelper helper(srcBuffer->getPixelFormat(), size);
textureClient = mCopyAllocator->CreateOrRecycle(helper);
if (!textureClient) {
GVDM_LOG("Copy buffer allocation failed!");
return nullptr;
}
sp<GraphicBuffer> destBuffer =
static_cast<GrallocTextureData*>(textureClient->GetInternalData())->GetGraphicBuffer();
CopyGraphicBuffer(srcBuffer, destBuffer);
} else {
textureClient = mNativeWindow->getTextureClientFromBuffer(srcBuffer.get());
textureClient->SetRecycleCallback(GonkVideoDecoderManager::RecycleCallback, this);
static_cast<GrallocTextureData*>(textureClient->GetInternalData())->SetMediaBuffer(aSource);
}
RefPtr<VideoData> data =
VideoData::CreateAndCopyIntoTextureClient(mConfig,
0, // Filled later by caller.
0, // Filled later by caller.
1, // No way to pass sample duration from muxer to
// OMX codec, so we hardcode the duration here.
textureClient,
false, // Filled later by caller.
-1,
aPicture);
return data.forget();
}
already_AddRefed<VideoData>
GonkVideoDecoderManager::CreateVideoDataFromDataBuffer(MediaBuffer* aSource, gfx::IntRect& aPicture)
{
if (!aSource->data()) {
GVDM_LOG("No data in Video Buffer!");
return nullptr;
}
uint8_t *yuv420p_buffer = (uint8_t *)aSource->data();
int32_t stride = mFrameInfo.mStride;
int32_t slice_height = mFrameInfo.mSliceHeight;
// Converts to OMX_COLOR_FormatYUV420Planar
if (mFrameInfo.mColorFormat != OMX_COLOR_FormatYUV420Planar) {
ARect crop;
crop.top = 0;
crop.bottom = mFrameInfo.mHeight;
crop.left = 0;
crop.right = mFrameInfo.mWidth;
yuv420p_buffer = GetColorConverterBuffer(mFrameInfo.mWidth, mFrameInfo.mHeight);
if (mColorConverter.convertDecoderOutputToI420(aSource->data(),
mFrameInfo.mWidth, mFrameInfo.mHeight, crop, yuv420p_buffer) != OK) {
GVDM_LOG("Color conversion failed!");
return nullptr;
}
stride = mFrameInfo.mWidth;
slice_height = mFrameInfo.mHeight;
}
size_t yuv420p_y_size = stride * slice_height;
size_t yuv420p_u_size = ((stride + 1) / 2) * ((slice_height + 1) / 2);
uint8_t *yuv420p_y = yuv420p_buffer;
uint8_t *yuv420p_u = yuv420p_y + yuv420p_y_size;
uint8_t *yuv420p_v = yuv420p_u + yuv420p_u_size;
VideoData::YCbCrBuffer b;
b.mPlanes[0].mData = yuv420p_y;
b.mPlanes[0].mWidth = mFrameInfo.mWidth;
b.mPlanes[0].mHeight = mFrameInfo.mHeight;
b.mPlanes[0].mStride = stride;
b.mPlanes[0].mOffset = 0;
b.mPlanes[0].mSkip = 0;
b.mPlanes[1].mData = yuv420p_u;
b.mPlanes[1].mWidth = (mFrameInfo.mWidth + 1) / 2;
b.mPlanes[1].mHeight = (mFrameInfo.mHeight + 1) / 2;
b.mPlanes[1].mStride = (stride + 1) / 2;
b.mPlanes[1].mOffset = 0;
b.mPlanes[1].mSkip = 0;
b.mPlanes[2].mData = yuv420p_v;
b.mPlanes[2].mWidth =(mFrameInfo.mWidth + 1) / 2;
b.mPlanes[2].mHeight = (mFrameInfo.mHeight + 1) / 2;
b.mPlanes[2].mStride = (stride + 1) / 2;
b.mPlanes[2].mOffset = 0;
b.mPlanes[2].mSkip = 0;
RefPtr<VideoData> data =
VideoData::CreateAndCopyData(mConfig,
mImageContainer,
0, // Filled later by caller.
0, // Filled later by caller.
1, // We don't know the duration.
b,
0, // Filled later by caller.
-1,
aPicture);
return data.forget();
}
bool
GonkVideoDecoderManager::SetVideoFormat()
{
// read video metadata from MediaCodec
sp<AMessage> codecFormat;
if (mDecoder->getOutputFormat(&codecFormat) == OK) {
AString mime;
int32_t width = 0;
int32_t height = 0;
int32_t stride = 0;
int32_t slice_height = 0;
int32_t color_format = 0;
int32_t crop_left = 0;
int32_t crop_top = 0;
int32_t crop_right = 0;
int32_t crop_bottom = 0;
if (!codecFormat->findString("mime", &mime) ||
!codecFormat->findInt32("width", &width) ||
!codecFormat->findInt32("height", &height) ||
!codecFormat->findInt32("stride", &stride) ||
!codecFormat->findInt32("slice-height", &slice_height) ||
!codecFormat->findInt32("color-format", &color_format) ||
!codecFormat->findRect("crop", &crop_left, &crop_top, &crop_right, &crop_bottom)) {
GVDM_LOG("Failed to find values");
return false;
}
mFrameInfo.mWidth = width;
mFrameInfo.mHeight = height;
mFrameInfo.mStride = stride;
mFrameInfo.mSliceHeight = slice_height;
mFrameInfo.mColorFormat = color_format;
nsIntSize displaySize(width, height);
if (!IsValidVideoRegion(mConfig.mDisplay,
mConfig.ScaledImageRect(width, height),
displaySize)) {
GVDM_LOG("It is not a valid region");
return false;
}
return true;
}
GVDM_LOG("Fail to get output format");
return false;
}
// Blocks until decoded sample is produced by the deoder.
nsresult
GonkVideoDecoderManager::Output(int64_t aStreamOffset,
RefPtr<MediaData>& aOutData)
{
aOutData = nullptr;
status_t err;
if (mDecoder == nullptr) {
GVDM_LOG("Decoder is not inited");
return NS_ERROR_UNEXPECTED;
}
MediaBuffer* outputBuffer = nullptr;
err = mDecoder->Output(&outputBuffer, READ_OUTPUT_BUFFER_TIMEOUT_US);
switch (err) {
case OK:
{
RefPtr<VideoData> data;
nsresult rv = CreateVideoData(outputBuffer, aStreamOffset, getter_AddRefs(data));
if (rv == NS_ERROR_NOT_AVAILABLE) {
// Decoder outputs a empty video buffer, try again
return NS_ERROR_NOT_AVAILABLE;
} else if (rv != NS_OK || data == nullptr) {
GVDM_LOG("Failed to create VideoData");
return NS_ERROR_UNEXPECTED;
}
aOutData = data;
return NS_OK;
}
case android::INFO_FORMAT_CHANGED:
{
// If the format changed, update our cached info.
GVDM_LOG("Decoder format changed");
if (!SetVideoFormat()) {
return NS_ERROR_UNEXPECTED;
}
return Output(aStreamOffset, aOutData);
}
case android::INFO_OUTPUT_BUFFERS_CHANGED:
{
if (mDecoder->UpdateOutputBuffers()) {
return Output(aStreamOffset, aOutData);
}
GVDM_LOG("Fails to update output buffers!");
return NS_ERROR_FAILURE;
}
case -EAGAIN:
{
// GVDM_LOG("Need to try again!");
return NS_ERROR_NOT_AVAILABLE;
}
case android::ERROR_END_OF_STREAM:
{
GVDM_LOG("Got the EOS frame!");
RefPtr<VideoData> data;
nsresult rv = CreateVideoData(outputBuffer, aStreamOffset, getter_AddRefs(data));
if (rv == NS_ERROR_NOT_AVAILABLE) {
// For EOS, no need to do any thing.
return NS_ERROR_ABORT;
}
if (rv != NS_OK || data == nullptr) {
GVDM_LOG("Failed to create video data");
return NS_ERROR_UNEXPECTED;
}
aOutData = data;
return NS_ERROR_ABORT;
}
case -ETIMEDOUT:
{
GVDM_LOG("Timeout. can try again next time");
return NS_ERROR_UNEXPECTED;
}
default:
{
GVDM_LOG("Decoder failed, err=%d", err);
return NS_ERROR_UNEXPECTED;
}
}
return NS_OK;
}
void
GonkVideoDecoderManager::codecReserved()
{
if (mInitPromise.IsEmpty()) {
return;
}
GVDM_LOG("codecReserved");
sp<AMessage> format = new AMessage;
sp<Surface> surface;
status_t rv = OK;
// Fixed values
GVDM_LOG("Configure video mime type: %s, width:%d, height:%d", mConfig.mMimeType.get(), mConfig.mImage.width, mConfig.mImage.height);
format->setString("mime", mConfig.mMimeType.get());
format->setInt32("width", mConfig.mImage.width);
format->setInt32("height", mConfig.mImage.height);
// Set the "moz-use-undequeued-bufs" to use the undeque buffers to accelerate
// the video decoding.
format->setInt32("moz-use-undequeued-bufs", 1);
if (mNativeWindow != nullptr) {
#if ANDROID_VERSION >= 21
surface = new Surface(mGraphicBufferProducer);
#else
surface = new Surface(mNativeWindow->getBufferQueue());
#endif
}
mDecoder->configure(format, surface, nullptr, 0);
mDecoder->Prepare();
if (mConfig.mMimeType.EqualsLiteral("video/mp4v-es")) {
rv = mDecoder->Input(mConfig.mCodecSpecificConfig->Elements(),
mConfig.mCodecSpecificConfig->Length(), 0,
android::MediaCodec::BUFFER_FLAG_CODECCONFIG,
CODECCONFIG_TIMEOUT_US);
}
if (rv != OK) {
GVDM_LOG("Failed to configure codec!!!!");
mInitPromise.Reject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
return;
}
mInitPromise.Resolve(TrackType::kVideoTrack, __func__);
}
void
GonkVideoDecoderManager::codecCanceled()
{
GVDM_LOG("codecCanceled");
mInitPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
}
// Called on GonkDecoderManager::mTaskLooper thread.
void
GonkVideoDecoderManager::onMessageReceived(const sp<AMessage> &aMessage)
{
switch (aMessage->what()) {
case kNotifyPostReleaseBuffer:
{
ReleaseAllPendingVideoBuffers();
break;
}
default:
{
GonkDecoderManager::onMessageReceived(aMessage);
break;
}
}
}
uint8_t *
GonkVideoDecoderManager::GetColorConverterBuffer(int32_t aWidth, int32_t aHeight)
{
// Allocate a temporary YUV420Planer buffer.
size_t yuv420p_y_size = aWidth * aHeight;
size_t yuv420p_u_size = ((aWidth + 1) / 2) * ((aHeight + 1) / 2);
size_t yuv420p_v_size = yuv420p_u_size;
size_t yuv420p_size = yuv420p_y_size + yuv420p_u_size + yuv420p_v_size;
if (mColorConverterBufferSize != yuv420p_size) {
mColorConverterBuffer = MakeUnique<uint8_t[]>(yuv420p_size);
mColorConverterBufferSize = yuv420p_size;
}
return mColorConverterBuffer.get();
}
/* static */
void
GonkVideoDecoderManager::RecycleCallback(TextureClient* aClient, void* aClosure)
{
MOZ_ASSERT(aClient && !aClient->IsDead());
GonkVideoDecoderManager* videoManager = static_cast<GonkVideoDecoderManager*>(aClosure);
GrallocTextureData* client = static_cast<GrallocTextureData*>(aClient->GetInternalData());
aClient->ClearRecycleCallback();
FenceHandle handle = aClient->GetAndResetReleaseFenceHandle();
videoManager->PostReleaseVideoBuffer(client->GetMediaBuffer(), handle);
}
void GonkVideoDecoderManager::PostReleaseVideoBuffer(
android::MediaBuffer *aBuffer,
FenceHandle aReleaseFence)
{
{
MutexAutoLock autoLock(mPendingReleaseItemsLock);
if (aBuffer) {
mPendingReleaseItems.AppendElement(ReleaseItem(aBuffer, aReleaseFence));
}
}
sp<AMessage> notify =
new AMessage(kNotifyPostReleaseBuffer, id());
notify->post();
}
void GonkVideoDecoderManager::ReleaseAllPendingVideoBuffers()
{
nsTArray<ReleaseItem> releasingItems;
{
MutexAutoLock autoLock(mPendingReleaseItemsLock);
releasingItems.AppendElements(mPendingReleaseItems);
mPendingReleaseItems.Clear();
}
// Free all pending video buffers without holding mPendingReleaseItemsLock.
size_t size = releasingItems.Length();
for (size_t i = 0; i < size; i++) {
RefPtr<FenceHandle::FdObj> fdObj = releasingItems[i].mReleaseFence.GetAndResetFdObj();
sp<Fence> fence = new Fence(fdObj->GetAndResetFd());
fence->waitForever("GonkVideoDecoderManager");
mDecoder->ReleaseMediaBuffer(releasingItems[i].mBuffer);
}
releasingItems.Clear();
}
} // namespace mozilla

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше