2012-05-21 15:12:37 +04:00
|
|
|
|
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
2010-08-18 06:17:50 +04:00
|
|
|
|
|
2010-08-13 06:28:15 +04:00
|
|
|
|
#include "VideoUtils.h"
|
2015-07-16 21:52:43 +03:00
|
|
|
|
|
|
|
|
|
#include "mozilla/Preferences.h"
|
|
|
|
|
#include "mozilla/Base64.h"
|
|
|
|
|
#include "mozilla/TaskQueue.h"
|
|
|
|
|
#include "mozilla/Telemetry.h"
|
|
|
|
|
|
2012-09-03 02:56:29 +04:00
|
|
|
|
#include "MediaResource.h"
|
2015-05-18 09:15:47 +03:00
|
|
|
|
#include "TimeUnits.h"
|
2011-06-24 02:08:54 +04:00
|
|
|
|
#include "nsMathUtils.h"
|
2013-09-06 00:25:17 +04:00
|
|
|
|
#include "nsSize.h"
|
2013-09-26 23:06:59 +04:00
|
|
|
|
#include "VorbisUtils.h"
|
2014-02-04 05:49:21 +04:00
|
|
|
|
#include "ImageContainer.h"
|
2015-08-03 21:44:10 +03:00
|
|
|
|
#include "mozilla/SharedThreadPool.h"
|
2014-10-13 02:53:43 +04:00
|
|
|
|
#include "nsIRandomGenerator.h"
|
|
|
|
|
#include "nsIServiceManager.h"
|
2014-12-09 22:34:00 +03:00
|
|
|
|
|
2013-07-30 18:25:31 +04:00
|
|
|
|
#include <stdint.h>
|
2012-01-11 12:23:07 +04:00
|
|
|
|
|
2013-11-04 02:45:19 +04:00
|
|
|
|
namespace mozilla {
|
|
|
|
|
|
2014-02-04 05:49:21 +04:00
|
|
|
|
using layers::PlanarYCbCrImage;
|
|
|
|
|
|
2011-09-27 07:31:18 +04:00
|
|
|
|
// Converts from number of audio frames to microseconds, given the specified
|
2010-08-13 06:28:15 +04:00
|
|
|
|
// audio rate.
|
2012-08-22 19:56:38 +04:00
|
|
|
|
CheckedInt64 FramesToUsecs(int64_t aFrames, uint32_t aRate) {
|
2012-02-22 16:28:06 +04:00
|
|
|
|
return (CheckedInt64(aFrames) * USECS_PER_S) / aRate;
|
2010-08-13 06:28:15 +04:00
|
|
|
|
}
|
|
|
|
|
|
2015-07-01 09:50:27 +03:00
|
|
|
|
media::TimeUnit FramesToTimeUnit(int64_t aFrames, uint32_t aRate) {
|
|
|
|
|
return (media::TimeUnit::FromMicroseconds(aFrames) * USECS_PER_S) / aRate;
|
|
|
|
|
}
|
|
|
|
|
|
2011-09-27 07:31:18 +04:00
|
|
|
|
// Converts from microseconds to number of audio frames, given the specified
|
2010-08-13 06:28:15 +04:00
|
|
|
|
// audio rate.
|
2012-08-22 19:56:38 +04:00
|
|
|
|
CheckedInt64 UsecsToFrames(int64_t aUsecs, uint32_t aRate) {
|
2012-02-22 16:28:06 +04:00
|
|
|
|
return (CheckedInt64(aUsecs) * aRate) / USECS_PER_S;
|
2010-08-13 06:28:15 +04:00
|
|
|
|
}
|
2011-06-24 02:08:54 +04:00
|
|
|
|
|
2015-06-18 17:45:05 +03:00
|
|
|
|
// Format TimeUnit as number of frames at given rate.
|
|
|
|
|
CheckedInt64 TimeUnitToFrames(const media::TimeUnit& aTime, uint32_t aRate) {
|
|
|
|
|
return UsecsToFrames(aTime.ToMicroseconds(), aRate);
|
|
|
|
|
}
|
|
|
|
|
|
2014-04-01 07:39:04 +04:00
|
|
|
|
nsresult SecondsToUsecs(double aSeconds, int64_t& aOutUsecs) {
|
|
|
|
|
if (aSeconds * double(USECS_PER_S) > INT64_MAX) {
|
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
|
}
|
|
|
|
|
aOutUsecs = int64_t(aSeconds * double(USECS_PER_S));
|
|
|
|
|
return NS_OK;
|
|
|
|
|
}
|
|
|
|
|
|
2012-08-22 19:56:38 +04:00
|
|
|
|
static int32_t ConditionDimension(float aValue)
|
2011-06-24 02:08:54 +04:00
|
|
|
|
{
|
|
|
|
|
// This will exclude NaNs and too-big values.
|
2012-09-28 10:57:33 +04:00
|
|
|
|
if (aValue > 1.0 && aValue <= INT32_MAX)
|
2012-08-22 19:56:38 +04:00
|
|
|
|
return int32_t(NS_round(aValue));
|
2011-06-24 02:08:54 +04:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void ScaleDisplayByAspectRatio(nsIntSize& aDisplay, float aAspectRatio)
|
|
|
|
|
{
|
|
|
|
|
if (aAspectRatio > 1.0) {
|
|
|
|
|
// Increase the intrinsic width
|
|
|
|
|
aDisplay.width = ConditionDimension(aAspectRatio * aDisplay.width);
|
|
|
|
|
} else {
|
|
|
|
|
// Increase the intrinsic height
|
|
|
|
|
aDisplay.height = ConditionDimension(aDisplay.height / aAspectRatio);
|
|
|
|
|
}
|
|
|
|
|
}
|
2012-09-03 02:56:29 +04:00
|
|
|
|
|
|
|
|
|
static int64_t BytesToTime(int64_t offset, int64_t length, int64_t durationUs) {
|
|
|
|
|
NS_ASSERTION(length > 0, "Must have positive length");
|
|
|
|
|
double r = double(offset) / double(length);
|
|
|
|
|
if (r > 1.0)
|
|
|
|
|
r = 1.0;
|
|
|
|
|
return int64_t(double(durationUs) * r);
|
|
|
|
|
}
|
|
|
|
|
|
2015-05-18 09:15:47 +03:00
|
|
|
|
media::TimeIntervals GetEstimatedBufferedTimeRanges(mozilla::MediaResource* aStream,
|
|
|
|
|
int64_t aDurationUsecs)
|
2012-09-03 02:56:29 +04:00
|
|
|
|
{
|
2015-05-18 09:15:47 +03:00
|
|
|
|
media::TimeIntervals buffered;
|
2012-09-03 02:56:29 +04:00
|
|
|
|
// Nothing to cache if the media takes 0us to play.
|
2015-05-18 09:15:47 +03:00
|
|
|
|
if (aDurationUsecs <= 0 || !aStream)
|
|
|
|
|
return buffered;
|
2012-09-03 02:56:29 +04:00
|
|
|
|
|
|
|
|
|
// Special case completely cached files. This also handles local files.
|
|
|
|
|
if (aStream->IsDataCachedToEndOfResource(0)) {
|
2015-05-18 09:15:47 +03:00
|
|
|
|
buffered +=
|
|
|
|
|
media::TimeInterval(media::TimeUnit::FromMicroseconds(0),
|
|
|
|
|
media::TimeUnit::FromMicroseconds(aDurationUsecs));
|
|
|
|
|
return buffered;
|
2012-09-03 02:56:29 +04:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int64_t totalBytes = aStream->GetLength();
|
|
|
|
|
|
|
|
|
|
// If we can't determine the total size, pretend that we have nothing
|
|
|
|
|
// buffered. This will put us in a state of eternally-low-on-undecoded-data
|
|
|
|
|
// which is not great, but about the best we can do.
|
|
|
|
|
if (totalBytes <= 0)
|
2015-05-18 09:15:47 +03:00
|
|
|
|
return buffered;
|
2012-09-03 02:56:29 +04:00
|
|
|
|
|
|
|
|
|
int64_t startOffset = aStream->GetNextCachedData(0);
|
|
|
|
|
while (startOffset >= 0) {
|
|
|
|
|
int64_t endOffset = aStream->GetCachedDataEnd(startOffset);
|
|
|
|
|
// Bytes [startOffset..endOffset] are cached.
|
|
|
|
|
NS_ASSERTION(startOffset >= 0, "Integer underflow in GetBuffered");
|
|
|
|
|
NS_ASSERTION(endOffset >= 0, "Integer underflow in GetBuffered");
|
|
|
|
|
|
|
|
|
|
int64_t startUs = BytesToTime(startOffset, totalBytes, aDurationUsecs);
|
|
|
|
|
int64_t endUs = BytesToTime(endOffset, totalBytes, aDurationUsecs);
|
|
|
|
|
if (startUs != endUs) {
|
2015-05-18 09:15:47 +03:00
|
|
|
|
buffered +=
|
|
|
|
|
media::TimeInterval(media::TimeUnit::FromMicroseconds(startUs),
|
|
|
|
|
|
|
|
|
|
media::TimeUnit::FromMicroseconds(endUs));
|
2012-09-03 02:56:29 +04:00
|
|
|
|
}
|
|
|
|
|
startOffset = aStream->GetNextCachedData(endOffset);
|
|
|
|
|
}
|
2015-05-18 09:15:47 +03:00
|
|
|
|
return buffered;
|
2012-09-03 02:56:29 +04:00
|
|
|
|
}
|
|
|
|
|
|
2013-09-26 23:06:59 +04:00
|
|
|
|
int DownmixAudioToStereo(mozilla::AudioDataValue* buffer,
|
|
|
|
|
int channels, uint32_t frames)
|
|
|
|
|
{
|
|
|
|
|
int outChannels;
|
|
|
|
|
outChannels = 2;
|
|
|
|
|
#ifdef MOZ_SAMPLE_TYPE_FLOAT32
|
|
|
|
|
// Downmix matrix. Per-row normalization 1 for rows 3,4 and 2 for rows 5-8.
|
|
|
|
|
static const float dmatrix[6][8][2]= {
|
|
|
|
|
/*3*/{{0.5858f,0},{0.4142f,0.4142f},{0, 0.5858f}},
|
|
|
|
|
/*4*/{{0.4226f,0},{0, 0.4226f},{0.366f,0.2114f},{0.2114f,0.366f}},
|
|
|
|
|
/*5*/{{0.6510f,0},{0.4600f,0.4600f},{0, 0.6510f},{0.5636f,0.3254f},{0.3254f,0.5636f}},
|
|
|
|
|
/*6*/{{0.5290f,0},{0.3741f,0.3741f},{0, 0.5290f},{0.4582f,0.2645f},{0.2645f,0.4582f},{0.3741f,0.3741f}},
|
|
|
|
|
/*7*/{{0.4553f,0},{0.3220f,0.3220f},{0, 0.4553f},{0.3943f,0.2277f},{0.2277f,0.3943f},{0.2788f,0.2788f},{0.3220f,0.3220f}},
|
|
|
|
|
/*8*/{{0.3886f,0},{0.2748f,0.2748f},{0, 0.3886f},{0.3366f,0.1943f},{0.1943f,0.3366f},{0.3366f,0.1943f},{0.1943f,0.3366f},{0.2748f,0.2748f}},
|
|
|
|
|
};
|
|
|
|
|
// Re-write the buffer with downmixed data
|
|
|
|
|
for (uint32_t i = 0; i < frames; i++) {
|
|
|
|
|
float sampL = 0.0;
|
|
|
|
|
float sampR = 0.0;
|
|
|
|
|
for (int j = 0; j < channels; j++) {
|
|
|
|
|
sampL+=buffer[i*channels+j]*dmatrix[channels-3][j][0];
|
|
|
|
|
sampR+=buffer[i*channels+j]*dmatrix[channels-3][j][1];
|
|
|
|
|
}
|
|
|
|
|
buffer[i*outChannels]=sampL;
|
|
|
|
|
buffer[i*outChannels+1]=sampR;
|
|
|
|
|
}
|
|
|
|
|
#else
|
|
|
|
|
// Downmix matrix. Per-row normalization 1 for rows 3,4 and 2 for rows 5-8.
|
|
|
|
|
// Coefficients in Q14.
|
|
|
|
|
static const int16_t dmatrix[6][8][2]= {
|
|
|
|
|
/*3*/{{9598, 0},{6786,6786},{0, 9598}},
|
|
|
|
|
/*4*/{{6925, 0},{0, 6925},{5997,3462},{3462,5997}},
|
|
|
|
|
/*5*/{{10663,0},{7540,7540},{0, 10663},{9234,5331},{5331,9234}},
|
|
|
|
|
/*6*/{{8668, 0},{6129,6129},{0, 8668},{7507,4335},{4335,7507},{6129,6129}},
|
|
|
|
|
/*7*/{{7459, 0},{5275,5275},{0, 7459},{6460,3731},{3731,6460},{4568,4568},{5275,5275}},
|
|
|
|
|
/*8*/{{6368, 0},{4502,4502},{0, 6368},{5514,3184},{3184,5514},{5514,3184},{3184,5514},{4502,4502}}
|
|
|
|
|
};
|
|
|
|
|
// Re-write the buffer with downmixed data
|
|
|
|
|
for (uint32_t i = 0; i < frames; i++) {
|
|
|
|
|
int32_t sampL = 0;
|
|
|
|
|
int32_t sampR = 0;
|
|
|
|
|
for (int j = 0; j < channels; j++) {
|
|
|
|
|
sampL+=buffer[i*channels+j]*dmatrix[channels-3][j][0];
|
|
|
|
|
sampR+=buffer[i*channels+j]*dmatrix[channels-3][j][1];
|
|
|
|
|
}
|
|
|
|
|
sampL = (sampL + 8192)>>14;
|
|
|
|
|
buffer[i*outChannels] = static_cast<mozilla::AudioDataValue>(MOZ_CLIP_TO_15(sampL));
|
|
|
|
|
sampR = (sampR + 8192)>>14;
|
|
|
|
|
buffer[i*outChannels+1] = static_cast<mozilla::AudioDataValue>(MOZ_CLIP_TO_15(sampR));
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
return outChannels;
|
|
|
|
|
}
|
|
|
|
|
|
2013-11-04 02:45:19 +04:00
|
|
|
|
bool
|
|
|
|
|
IsVideoContentType(const nsCString& aContentType)
|
|
|
|
|
{
|
|
|
|
|
NS_NAMED_LITERAL_CSTRING(video, "video");
|
|
|
|
|
if (FindInReadable(video, aContentType)) {
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2014-02-04 05:49:21 +04:00
|
|
|
|
bool
|
|
|
|
|
IsValidVideoRegion(const nsIntSize& aFrame, const nsIntRect& aPicture,
|
|
|
|
|
const nsIntSize& aDisplay)
|
|
|
|
|
{
|
|
|
|
|
return
|
|
|
|
|
aFrame.width <= PlanarYCbCrImage::MAX_DIMENSION &&
|
|
|
|
|
aFrame.height <= PlanarYCbCrImage::MAX_DIMENSION &&
|
|
|
|
|
aFrame.width * aFrame.height <= MAX_VIDEO_WIDTH * MAX_VIDEO_HEIGHT &&
|
|
|
|
|
aFrame.width * aFrame.height != 0 &&
|
|
|
|
|
aPicture.width <= PlanarYCbCrImage::MAX_DIMENSION &&
|
|
|
|
|
aPicture.x < PlanarYCbCrImage::MAX_DIMENSION &&
|
|
|
|
|
aPicture.x + aPicture.width < PlanarYCbCrImage::MAX_DIMENSION &&
|
|
|
|
|
aPicture.height <= PlanarYCbCrImage::MAX_DIMENSION &&
|
|
|
|
|
aPicture.y < PlanarYCbCrImage::MAX_DIMENSION &&
|
|
|
|
|
aPicture.y + aPicture.height < PlanarYCbCrImage::MAX_DIMENSION &&
|
|
|
|
|
aPicture.width * aPicture.height <= MAX_VIDEO_WIDTH * MAX_VIDEO_HEIGHT &&
|
|
|
|
|
aPicture.width * aPicture.height != 0 &&
|
|
|
|
|
aDisplay.width <= PlanarYCbCrImage::MAX_DIMENSION &&
|
|
|
|
|
aDisplay.height <= PlanarYCbCrImage::MAX_DIMENSION &&
|
|
|
|
|
aDisplay.width * aDisplay.height <= MAX_VIDEO_WIDTH * MAX_VIDEO_HEIGHT &&
|
|
|
|
|
aDisplay.width * aDisplay.height != 0;
|
|
|
|
|
}
|
|
|
|
|
|
2015-06-17 17:00:52 +03:00
|
|
|
|
already_AddRefed<SharedThreadPool> GetMediaThreadPool(MediaThreadType aType)
|
2014-06-18 09:07:02 +04:00
|
|
|
|
{
|
2015-05-07 07:01:43 +03:00
|
|
|
|
const char *name;
|
|
|
|
|
switch (aType) {
|
|
|
|
|
case MediaThreadType::PLATFORM_DECODER:
|
|
|
|
|
name = "MediaPDecoder";
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
MOZ_ASSERT(false);
|
|
|
|
|
case MediaThreadType::PLAYBACK:
|
|
|
|
|
name = "MediaPlayback";
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
return SharedThreadPool::
|
|
|
|
|
Get(nsDependentCString(name),
|
|
|
|
|
Preferences::GetUint("media.num-decode-threads", 12));
|
2014-06-18 09:07:02 +04:00
|
|
|
|
}
|
|
|
|
|
|
2014-08-28 07:32:34 +04:00
|
|
|
|
bool
|
|
|
|
|
ExtractH264CodecDetails(const nsAString& aCodec,
|
|
|
|
|
int16_t& aProfile,
|
|
|
|
|
int16_t& aLevel)
|
|
|
|
|
{
|
2014-12-23 06:42:55 +03:00
|
|
|
|
// H.264 codecs parameters have a type defined as avcN.PPCCLL, where
|
|
|
|
|
// N = avc type. avc3 is avcc with SPS & PPS implicit (within stream)
|
2014-08-28 07:32:34 +04:00
|
|
|
|
// PP = profile_idc, CC = constraint_set flags, LL = level_idc.
|
|
|
|
|
// We ignore the constraint_set flags, as it's not clear from any
|
|
|
|
|
// documentation what constraints the platform decoders support.
|
|
|
|
|
// See http://blog.pearce.org.nz/2013/11/what-does-h264avc1-codecs-parameters.html
|
|
|
|
|
// for more details.
|
|
|
|
|
if (aCodec.Length() != strlen("avc1.PPCCLL")) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2014-12-23 06:42:55 +03:00
|
|
|
|
// Verify the codec starts with "avc1." or "avc3.".
|
2014-08-28 07:32:34 +04:00
|
|
|
|
const nsAString& sample = Substring(aCodec, 0, 5);
|
2014-12-23 06:42:55 +03:00
|
|
|
|
if (!sample.EqualsASCII("avc1.") && !sample.EqualsASCII("avc3.")) {
|
2014-08-28 07:32:34 +04:00
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2015-02-11 11:09:00 +03:00
|
|
|
|
// Extract the profile_idc and level_idc.
|
2014-08-28 07:32:34 +04:00
|
|
|
|
nsresult rv = NS_OK;
|
|
|
|
|
aProfile = PromiseFlatString(Substring(aCodec, 5, 2)).ToInteger(&rv, 16);
|
|
|
|
|
NS_ENSURE_SUCCESS(rv, false);
|
|
|
|
|
|
|
|
|
|
aLevel = PromiseFlatString(Substring(aCodec, 9, 2)).ToInteger(&rv, 16);
|
|
|
|
|
NS_ENSURE_SUCCESS(rv, false);
|
|
|
|
|
|
2015-03-18 06:10:57 +03:00
|
|
|
|
if (aLevel == 9) {
|
|
|
|
|
aLevel = H264_LEVEL_1_b;
|
|
|
|
|
} else if (aLevel <= 5) {
|
|
|
|
|
aLevel *= 10;
|
|
|
|
|
}
|
|
|
|
|
|
2015-03-03 16:54:00 +03:00
|
|
|
|
// Capture the constraint_set flag value for the purpose of Telemetry.
|
|
|
|
|
// We don't NS_ENSURE_SUCCESS here because ExtractH264CodecDetails doesn't
|
|
|
|
|
// care about this, but we make sure constraints is above 4 (constraint_set5_flag)
|
|
|
|
|
// otherwise collect 0 for unknown.
|
|
|
|
|
uint8_t constraints = PromiseFlatString(Substring(aCodec, 7, 2)).ToInteger(&rv, 16);
|
|
|
|
|
Telemetry::Accumulate(Telemetry::VIDEO_CANPLAYTYPE_H264_CONSTRAINT_SET_FLAG,
|
|
|
|
|
constraints >= 4 ? constraints : 0);
|
2015-02-11 11:09:00 +03:00
|
|
|
|
|
|
|
|
|
// 244 is the highest meaningful profile value (High 4:4:4 Intra Profile)
|
|
|
|
|
// that can be represented as single hex byte, otherwise collect 0 for unknown.
|
|
|
|
|
Telemetry::Accumulate(Telemetry::VIDEO_CANPLAYTYPE_H264_PROFILE,
|
|
|
|
|
aProfile <= 244 ? aProfile : 0);
|
|
|
|
|
|
|
|
|
|
// Make sure aLevel represents a value between levels 1 and 5.2,
|
|
|
|
|
// otherwise collect 0 for unknown.
|
|
|
|
|
Telemetry::Accumulate(Telemetry::VIDEO_CANPLAYTYPE_H264_LEVEL,
|
|
|
|
|
(aLevel >= 10 && aLevel <= 52) ? aLevel : 0);
|
|
|
|
|
|
2014-08-28 07:32:34 +04:00
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2014-10-13 02:53:43 +04:00
|
|
|
|
nsresult
|
2015-03-03 17:51:05 +03:00
|
|
|
|
GenerateRandomName(nsCString& aOutSalt, uint32_t aLength)
|
2014-10-13 02:53:43 +04:00
|
|
|
|
{
|
|
|
|
|
nsresult rv;
|
|
|
|
|
nsCOMPtr<nsIRandomGenerator> rg =
|
|
|
|
|
do_GetService("@mozilla.org/security/random-generator;1", &rv);
|
|
|
|
|
if (NS_FAILED(rv)) return rv;
|
|
|
|
|
|
2015-03-03 17:51:05 +03:00
|
|
|
|
// For each three bytes of random data we will get four bytes of ASCII.
|
2014-10-13 02:53:43 +04:00
|
|
|
|
const uint32_t requiredBytesLength =
|
2015-03-03 17:51:05 +03:00
|
|
|
|
static_cast<uint32_t>((aLength + 3) / 4 * 3);
|
2014-10-13 02:53:43 +04:00
|
|
|
|
|
|
|
|
|
uint8_t* buffer;
|
|
|
|
|
rv = rg->GenerateRandomBytes(requiredBytesLength, &buffer);
|
|
|
|
|
if (NS_FAILED(rv)) return rv;
|
|
|
|
|
|
|
|
|
|
nsAutoCString temp;
|
|
|
|
|
nsDependentCSubstring randomData(reinterpret_cast<const char*>(buffer),
|
|
|
|
|
requiredBytesLength);
|
|
|
|
|
rv = Base64Encode(randomData, temp);
|
2015-04-01 08:29:55 +03:00
|
|
|
|
free(buffer);
|
2014-10-13 02:53:43 +04:00
|
|
|
|
buffer = nullptr;
|
|
|
|
|
if (NS_FAILED (rv)) return rv;
|
|
|
|
|
|
2015-03-03 17:51:05 +03:00
|
|
|
|
aOutSalt = temp;
|
|
|
|
|
return NS_OK;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
|
GenerateRandomPathName(nsCString& aOutSalt, uint32_t aLength)
|
|
|
|
|
{
|
|
|
|
|
nsresult rv = GenerateRandomName(aOutSalt, aLength);
|
|
|
|
|
if (NS_FAILED(rv)) return rv;
|
2014-10-13 02:53:43 +04:00
|
|
|
|
|
|
|
|
|
// Base64 characters are alphanumeric (a-zA-Z0-9) and '+' and '/', so we need
|
|
|
|
|
// to replace illegal characters -- notably '/'
|
2015-03-03 17:51:05 +03:00
|
|
|
|
aOutSalt.ReplaceChar(FILE_PATH_SEPARATOR FILE_ILLEGAL_CHARACTERS, '_');
|
2014-10-13 02:53:43 +04:00
|
|
|
|
return NS_OK;
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-16 21:13:49 +03:00
|
|
|
|
already_AddRefed<TaskQueue>
|
2014-12-09 22:34:00 +03:00
|
|
|
|
CreateMediaDecodeTaskQueue()
|
|
|
|
|
{
|
2015-07-20 09:07:25 +03:00
|
|
|
|
nsRefPtr<TaskQueue> queue = new TaskQueue(
|
|
|
|
|
GetMediaThreadPool(MediaThreadType::PLATFORM_DECODER));
|
|
|
|
|
return queue.forget();
|
2014-12-09 22:34:00 +03:00
|
|
|
|
}
|
2014-10-13 02:53:43 +04:00
|
|
|
|
|
2015-07-16 21:13:49 +03:00
|
|
|
|
already_AddRefed<FlushableTaskQueue>
|
2015-02-15 06:08:15 +03:00
|
|
|
|
CreateFlushableMediaDecodeTaskQueue()
|
|
|
|
|
{
|
2015-07-20 09:07:25 +03:00
|
|
|
|
nsRefPtr<FlushableTaskQueue> queue = new FlushableTaskQueue(
|
|
|
|
|
GetMediaThreadPool(MediaThreadType::PLATFORM_DECODER));
|
|
|
|
|
return queue.forget();
|
2015-02-15 06:08:15 +03:00
|
|
|
|
}
|
|
|
|
|
|
2014-02-04 05:49:21 +04:00
|
|
|
|
} // end namespace mozilla
|