зеркало из https://github.com/mozilla/gecko-dev.git
Bug 856361. Part 8: Fix copying of track data from input streams to output streams in AudioNodeExternalInputStream. r=padenot
--HG-- extra : rebase_source : 344780d9c27ce3026ae5c12a08ea6a1ba3b4e924
This commit is contained in:
Родитель
8976bb22fd
Коммит
2d1ffcaf14
|
@ -66,6 +66,9 @@ AudioNodeExternalInputStream::GetTrackMapEntry(const StreamBuffer::Track& aTrack
|
|||
}
|
||||
|
||||
TrackMapEntry* map = mTrackMap.AppendElement();
|
||||
map->mEndOfConsumedInputTicks = 0;
|
||||
map->mEndOfLastInputIntervalInInputStream = -1;
|
||||
map->mEndOfLastInputIntervalInOutputStream = -1;
|
||||
map->mSamplesPassedToResampler =
|
||||
TimeToTicksRoundUp(aTrack.GetRate(), GraphTimeToStreamTime(aFrom));
|
||||
map->mResampler = resampler;
|
||||
|
@ -404,9 +407,7 @@ AudioNodeExternalInputStream::ProduceOutput(GraphTime aFrom, GraphTime aTo)
|
|||
// Ticks >= startTicks and < endTicks are in the interval
|
||||
StreamTime outputEnd = GraphTimeToStreamTime(interval.mEnd);
|
||||
TrackTicks startTicks = trackMap->mSamplesPassedToResampler + segment.GetDuration();
|
||||
#ifdef DEBUG
|
||||
StreamTime outputStart = GraphTimeToStreamTime(interval.mStart);
|
||||
#endif
|
||||
NS_ASSERTION(startTicks == TimeToTicksRoundUp(inputTrackRate, outputStart),
|
||||
"Samples missing");
|
||||
TrackTicks endTicks = TimeToTicksRoundUp(inputTrackRate, outputEnd);
|
||||
|
@ -416,15 +417,32 @@ AudioNodeExternalInputStream::ProduceOutput(GraphTime aFrom, GraphTime aTo)
|
|||
segment.AppendNullData(ticks);
|
||||
} else {
|
||||
// See comments in TrackUnionStream::CopyTrackData
|
||||
// StreamTime inputStart = source->GraphTimeToStreamTime(interval.mStart);
|
||||
StreamTime inputStart = source->GraphTimeToStreamTime(interval.mStart);
|
||||
StreamTime inputEnd = source->GraphTimeToStreamTime(interval.mEnd);
|
||||
TrackTicks inputTrackEndPoint =
|
||||
inputTrack.IsEnded() ? inputTrack.GetEnd() : TRACK_TICKS_MAX;
|
||||
TrackTicks inputEndTicks = TimeToTicksRoundUp(inputTrackRate, inputEnd);
|
||||
TrackTicks inputStartTicks = inputEndTicks - ticks;
|
||||
segment.AppendSlice(*inputTrack.GetSegment(),
|
||||
std::min(inputTrackEndPoint, inputStartTicks),
|
||||
std::min(inputTrackEndPoint, inputEndTicks));
|
||||
|
||||
if (trackMap->mEndOfLastInputIntervalInInputStream != inputStart ||
|
||||
trackMap->mEndOfLastInputIntervalInOutputStream != outputStart) {
|
||||
// Start of a new series of intervals where neither stream is blocked.
|
||||
trackMap->mEndOfConsumedInputTicks = TimeToTicksRoundDown(inputTrackRate, inputStart) - 1;
|
||||
}
|
||||
TrackTicks inputStartTicks = trackMap->mEndOfConsumedInputTicks;
|
||||
TrackTicks inputEndTicks = inputStartTicks + ticks;
|
||||
trackMap->mEndOfConsumedInputTicks = inputEndTicks;
|
||||
trackMap->mEndOfLastInputIntervalInInputStream = inputEnd;
|
||||
trackMap->mEndOfLastInputIntervalInOutputStream = outputEnd;
|
||||
|
||||
if (inputStartTicks < 0) {
|
||||
// Data before the start of the track is just null.
|
||||
segment.AppendNullData(-inputStartTicks);
|
||||
inputStartTicks = 0;
|
||||
}
|
||||
if (inputEndTicks > inputStartTicks) {
|
||||
segment.AppendSlice(*inputTrack.GetSegment(),
|
||||
std::min(inputTrackEndPoint, inputStartTicks),
|
||||
std::min(inputTrackEndPoint, inputEndTicks));
|
||||
}
|
||||
// Pad if we're looking past the end of the track
|
||||
segment.AppendNullData(std::max<TrackTicks>(0, inputEndTicks - inputTrackEndPoint));
|
||||
}
|
||||
|
|
|
@ -57,6 +57,17 @@ private:
|
|||
AudioSampleFormat aFormat,
|
||||
float aVolume);
|
||||
|
||||
// mEndOfConsumedInputTicks is the end of the input ticks that we've consumed.
|
||||
// 0 if we haven't consumed any yet.
|
||||
TrackTicks mEndOfConsumedInputTicks;
|
||||
// mEndOfLastInputIntervalInInputStream is the timestamp for the end of the
|
||||
// previous interval which was unblocked for both the input and output
|
||||
// stream, in the input stream's timeline, or -1 if there wasn't one.
|
||||
StreamTime mEndOfLastInputIntervalInInputStream;
|
||||
// mEndOfLastInputIntervalInOutputStream is the timestamp for the end of the
|
||||
// previous interval which was unblocked for both the input and output
|
||||
// stream, in the output stream's timeline, or -1 if there wasn't one.
|
||||
StreamTime mEndOfLastInputIntervalInOutputStream;
|
||||
/**
|
||||
* Number of samples passed to the resampler so far.
|
||||
*/
|
||||
|
|
|
@ -11,13 +11,22 @@
|
|||
<pre id="test">
|
||||
<script class="testbody" type="text/javascript">
|
||||
|
||||
function createBuffer(context, delay) {
|
||||
var buffer = context.createBuffer(2, 2048, context.sampleRate);
|
||||
for (var i = 0; i < 2048 - delay; ++i) {
|
||||
buffer.getChannelData(0)[i + delay] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
|
||||
buffer.getChannelData(1)[i + delay] = buffer.getChannelData(0)[i + delay];
|
||||
}
|
||||
return buffer;
|
||||
}
|
||||
|
||||
var gTest = {
|
||||
length: 2048,
|
||||
skipOfflineContextTests: true,
|
||||
createGraph: function(context) {
|
||||
var sourceGraph = new AudioContext();
|
||||
var source = sourceGraph.createBufferSource();
|
||||
source.buffer = this.buffer;
|
||||
source.buffer = createBuffer(context, 0);
|
||||
var dest = sourceGraph.createMediaStreamDestination();
|
||||
source.connect(dest);
|
||||
source.start(0);
|
||||
|
@ -26,13 +35,7 @@ var gTest = {
|
|||
return mediaStreamSource;
|
||||
},
|
||||
createExpectedBuffers: function(context) {
|
||||
var buffer = context.createBuffer(2, 2048, context.sampleRate);
|
||||
for (var i = 0; i < 2048; ++i) {
|
||||
buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
|
||||
buffer.getChannelData(1)[i] = buffer.getChannelData(0)[i];
|
||||
}
|
||||
this.buffer = buffer;
|
||||
return buffer;
|
||||
return createBuffer(context, 1);
|
||||
},
|
||||
};
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче