本文整理汇总了C++中MediaStream::GraphTimeToStreamTime方法的典型用法代码示例。如果您正苦于以下问题:C++ MediaStream::GraphTimeToStreamTime方法的具体用法?C++ MediaStream::GraphTimeToStreamTime怎么用?C++ MediaStream::GraphTimeToStreamTime使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类MediaStream
的用法示例。
在下文中一共展示了MediaStream::GraphTimeToStreamTime方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: CopyTrackData
void TrackUnionStream::CopyTrackData(StreamBuffer::Track* aInputTrack,
uint32_t aMapIndex, GraphTime aFrom, GraphTime aTo,
bool* aOutputTrackFinished)
{
TrackMapEntry* map = &mTrackMap[aMapIndex];
StreamBuffer::Track* outputTrack = mBuffer.FindTrack(map->mOutputTrackID);
MOZ_ASSERT(outputTrack && !outputTrack->IsEnded(), "Can't copy to ended track");
MediaSegment* segment = map->mSegment;
MediaStream* source = map->mInputPort->GetSource();
GraphTime next;
*aOutputTrackFinished = false;
for (GraphTime t = aFrom; t < aTo; t = next) {
MediaInputPort::InputInterval interval = map->mInputPort->GetNextInputInterval(t);
interval.mEnd = std::min(interval.mEnd, aTo);
StreamTime inputEnd = source->GraphTimeToStreamTime(interval.mEnd);
StreamTime inputTrackEndPoint = STREAM_TIME_MAX;
if (aInputTrack->IsEnded() &&
aInputTrack->GetEnd() <= inputEnd) {
inputTrackEndPoint = aInputTrack->GetEnd();
*aOutputTrackFinished = true;
}
if (interval.mStart >= interval.mEnd) {
break;
}
StreamTime ticks = interval.mEnd - interval.mStart;
next = interval.mEnd;
StreamTime outputStart = outputTrack->GetEnd();
if (interval.mInputIsBlocked) {
// Maybe the input track ended?
segment->AppendNullData(ticks);
STREAM_LOG(PR_LOG_DEBUG+1, ("TrackUnionStream %p appending %lld ticks of null data to track %d",
this, (long long)ticks, outputTrack->GetID()));
} else if (InMutedCycle()) {
segment->AppendNullData(ticks);
} else {
MOZ_ASSERT(outputTrack->GetEnd() == GraphTimeToStreamTime(interval.mStart),
"Samples missing");
StreamTime inputStart = source->GraphTimeToStreamTime(interval.mStart);
segment->AppendSlice(*aInputTrack->GetSegment(),
std::min(inputTrackEndPoint, inputStart),
std::min(inputTrackEndPoint, inputEnd));
}
ApplyTrackDisabling(outputTrack->GetID(), segment);
for (uint32_t j = 0; j < mListeners.Length(); ++j) {
MediaStreamListener* l = mListeners[j];
l->NotifyQueuedTrackChanges(Graph(), outputTrack->GetID(),
outputStart, 0, *segment);
}
outputTrack->GetSegment()->AppendFrom(segment);
}
}
示例2: AdvanceOutputSegment
void
AudioNodeExternalInputStream::ProcessInput(GraphTime aFrom, GraphTime aTo,
uint32_t aFlags)
{
// According to spec, number of outputs is always 1.
MOZ_ASSERT(mLastChunks.Length() == 1);
// GC stuff can result in our input stream being destroyed before this stream.
// Handle that.
if (!IsEnabled() || mInputs.IsEmpty() || mPassThrough) {
mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
AdvanceOutputSegment();
return;
}
MOZ_ASSERT(mInputs.Length() == 1);
MediaStream* source = mInputs[0]->GetSource();
nsAutoTArray<AudioSegment,1> audioSegments;
uint32_t inputChannels = 0;
for (StreamBuffer::TrackIter tracks(source->mBuffer, MediaSegment::AUDIO);
!tracks.IsEnded(); tracks.Next()) {
const StreamBuffer::Track& inputTrack = *tracks;
const AudioSegment& inputSegment =
*static_cast<AudioSegment*>(inputTrack.GetSegment());
if (inputSegment.IsNull()) {
continue;
}
AudioSegment& segment = *audioSegments.AppendElement();
GraphTime next;
for (GraphTime t = aFrom; t < aTo; t = next) {
MediaInputPort::InputInterval interval = mInputs[0]->GetNextInputInterval(t);
interval.mEnd = std::min(interval.mEnd, aTo);
if (interval.mStart >= interval.mEnd)
break;
next = interval.mEnd;
StreamTime outputStart = GraphTimeToStreamTime(interval.mStart);
StreamTime outputEnd = GraphTimeToStreamTime(interval.mEnd);
StreamTime ticks = outputEnd - outputStart;
if (interval.mInputIsBlocked) {
segment.AppendNullData(ticks);
} else {
StreamTime inputStart =
std::min(inputSegment.GetDuration(),
source->GraphTimeToStreamTime(interval.mStart));
StreamTime inputEnd =
std::min(inputSegment.GetDuration(),
source->GraphTimeToStreamTime(interval.mEnd));
segment.AppendSlice(inputSegment, inputStart, inputEnd);
// Pad if we're looking past the end of the track
segment.AppendNullData(ticks - (inputEnd - inputStart));
}
}
for (AudioSegment::ChunkIterator iter(segment); !iter.IsEnded(); iter.Next()) {
inputChannels = GetAudioChannelsSuperset(inputChannels, iter->ChannelCount());
}
}
uint32_t accumulateIndex = 0;
if (inputChannels) {
nsAutoTArray<float,GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer;
for (uint32_t i = 0; i < audioSegments.Length(); ++i) {
AudioChunk tmpChunk;
ConvertSegmentToAudioBlock(&audioSegments[i], &tmpChunk, inputChannels);
if (!tmpChunk.IsNull()) {
if (accumulateIndex == 0) {
AllocateAudioBlock(inputChannels, &mLastChunks[0]);
}
AccumulateInputChunk(accumulateIndex, tmpChunk, &mLastChunks[0], &downmixBuffer);
accumulateIndex++;
}
}
}
if (accumulateIndex == 0) {
mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
}
// Using AudioNodeStream's AdvanceOutputSegment to push the media stream graph along with null data.
AdvanceOutputSegment();
}
示例3: GraphTimeToStreamTime
void
AudioNodeExternalInputStream::ProcessInput(GraphTime aFrom, GraphTime aTo,
uint32_t aFlags)
{
// According to spec, number of outputs is always 1.
MOZ_ASSERT(mLastChunks.Length() == 1);
// GC stuff can result in our input stream being destroyed before this stream.
// Handle that.
if (!IsEnabled() || mInputs.IsEmpty() || mPassThrough) {
mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
return;
}
MOZ_ASSERT(mInputs.Length() == 1);
MediaStream* source = mInputs[0]->GetSource();
AutoTArray<AudioSegment,1> audioSegments;
uint32_t inputChannels = 0;
for (StreamTracks::TrackIter tracks(source->mTracks);
!tracks.IsEnded(); tracks.Next()) {
const StreamTracks::Track& inputTrack = *tracks;
if (!mInputs[0]->PassTrackThrough(tracks->GetID())) {
continue;
}
if (inputTrack.GetSegment()->GetType() == MediaSegment::VIDEO) {
MOZ_ASSERT(false, "AudioNodeExternalInputStream shouldn't have video tracks");
continue;
}
const AudioSegment& inputSegment =
*static_cast<AudioSegment*>(inputTrack.GetSegment());
if (inputSegment.IsNull()) {
continue;
}
AudioSegment& segment = *audioSegments.AppendElement();
GraphTime next;
for (GraphTime t = aFrom; t < aTo; t = next) {
MediaInputPort::InputInterval interval = mInputs[0]->GetNextInputInterval(t);
interval.mEnd = std::min(interval.mEnd, aTo);
if (interval.mStart >= interval.mEnd)
break;
next = interval.mEnd;
// We know this stream does not block during the processing interval ---
// we're not finished, we don't underrun, and we're not suspended.
StreamTime outputStart = GraphTimeToStreamTime(interval.mStart);
StreamTime outputEnd = GraphTimeToStreamTime(interval.mEnd);
StreamTime ticks = outputEnd - outputStart;
if (interval.mInputIsBlocked) {
segment.AppendNullData(ticks);
} else {
// The input stream is not blocked in this interval, so no need to call
// GraphTimeToStreamTimeWithBlocking.
StreamTime inputStart =
std::min(inputSegment.GetDuration(),
source->GraphTimeToStreamTime(interval.mStart));
StreamTime inputEnd =
std::min(inputSegment.GetDuration(),
source->GraphTimeToStreamTime(interval.mEnd));
segment.AppendSlice(inputSegment, inputStart, inputEnd);
// Pad if we're looking past the end of the track
segment.AppendNullData(ticks - (inputEnd - inputStart));
}
}
for (AudioSegment::ChunkIterator iter(segment); !iter.IsEnded(); iter.Next()) {
inputChannels = GetAudioChannelsSuperset(inputChannels, iter->ChannelCount());
}
}
uint32_t accumulateIndex = 0;
if (inputChannels) {
DownmixBufferType downmixBuffer;
ASSERT_ALIGNED16(downmixBuffer.Elements());
for (uint32_t i = 0; i < audioSegments.Length(); ++i) {
AudioBlock tmpChunk;
ConvertSegmentToAudioBlock(&audioSegments[i], &tmpChunk, inputChannels);
if (!tmpChunk.IsNull()) {
if (accumulateIndex == 0) {
mLastChunks[0].AllocateChannels(inputChannels);
}
AccumulateInputChunk(accumulateIndex, tmpChunk, &mLastChunks[0], &downmixBuffer);
accumulateIndex++;
}
}
}
if (accumulateIndex == 0) {
mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
}
}
示例4: AdvanceOutputSegment
void
AudioNodeExternalInputStream::ProcessInput(GraphTime aFrom, GraphTime aTo,
uint32_t aFlags)
{
// According to spec, number of outputs is always 1.
mLastChunks.SetLength(1);
// GC stuff can result in our input stream being destroyed before this stream.
// Handle that.
if (mInputs.IsEmpty()) {
mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
AdvanceOutputSegment();
return;
}
MOZ_ASSERT(mInputs.Length() == 1);
MediaStream* source = mInputs[0]->GetSource();
nsAutoTArray<AudioSegment,1> audioSegments;
nsAutoTArray<bool,1> trackMapEntriesUsed;
uint32_t inputChannels = 0;
for (StreamBuffer::TrackIter tracks(source->mBuffer, MediaSegment::AUDIO);
!tracks.IsEnded(); tracks.Next()) {
const StreamBuffer::Track& inputTrack = *tracks;
// Create a TrackMapEntry if necessary.
size_t trackMapIndex = GetTrackMapEntry(inputTrack, aFrom);
// Maybe there's nothing in this track yet. If so, ignore it. (While the
// track is only playing silence, we may not be able to determine the
// correct number of channels to start resampling.)
if (trackMapIndex == nsTArray<TrackMapEntry>::NoIndex) {
continue;
}
while (trackMapEntriesUsed.Length() <= trackMapIndex) {
trackMapEntriesUsed.AppendElement(false);
}
trackMapEntriesUsed[trackMapIndex] = true;
TrackMapEntry* trackMap = &mTrackMap[trackMapIndex];
AudioSegment segment;
GraphTime next;
TrackRate inputTrackRate = inputTrack.GetRate();
for (GraphTime t = aFrom; t < aTo; t = next) {
MediaInputPort::InputInterval interval = mInputs[0]->GetNextInputInterval(t);
interval.mEnd = std::min(interval.mEnd, aTo);
if (interval.mStart >= interval.mEnd)
break;
next = interval.mEnd;
// Ticks >= startTicks and < endTicks are in the interval
StreamTime outputEnd = GraphTimeToStreamTime(interval.mEnd);
TrackTicks startTicks = trackMap->mSamplesPassedToResampler + segment.GetDuration();
StreamTime outputStart = GraphTimeToStreamTime(interval.mStart);
NS_ASSERTION(startTicks == TimeToTicksRoundUp(inputTrackRate, outputStart),
"Samples missing");
TrackTicks endTicks = TimeToTicksRoundUp(inputTrackRate, outputEnd);
TrackTicks ticks = endTicks - startTicks;
if (interval.mInputIsBlocked) {
segment.AppendNullData(ticks);
} else {
// See comments in TrackUnionStream::CopyTrackData
StreamTime inputStart = source->GraphTimeToStreamTime(interval.mStart);
StreamTime inputEnd = source->GraphTimeToStreamTime(interval.mEnd);
TrackTicks inputTrackEndPoint =
inputTrack.IsEnded() ? inputTrack.GetEnd() : TRACK_TICKS_MAX;
if (trackMap->mEndOfLastInputIntervalInInputStream != inputStart ||
trackMap->mEndOfLastInputIntervalInOutputStream != outputStart) {
// Start of a new series of intervals where neither stream is blocked.
trackMap->mEndOfConsumedInputTicks = TimeToTicksRoundDown(inputTrackRate, inputStart) - 1;
}
TrackTicks inputStartTicks = trackMap->mEndOfConsumedInputTicks;
TrackTicks inputEndTicks = inputStartTicks + ticks;
trackMap->mEndOfConsumedInputTicks = inputEndTicks;
trackMap->mEndOfLastInputIntervalInInputStream = inputEnd;
trackMap->mEndOfLastInputIntervalInOutputStream = outputEnd;
if (inputStartTicks < 0) {
// Data before the start of the track is just null.
segment.AppendNullData(-inputStartTicks);
inputStartTicks = 0;
}
if (inputEndTicks > inputStartTicks) {
segment.AppendSlice(*inputTrack.GetSegment(),
std::min(inputTrackEndPoint, inputStartTicks),
std::min(inputTrackEndPoint, inputEndTicks));
}
// Pad if we're looking past the end of the track
segment.AppendNullData(ticks - segment.GetDuration());
}
}
trackMap->mSamplesPassedToResampler += segment.GetDuration();
trackMap->ResampleInputData(&segment);
if (trackMap->mResampledData.GetDuration() < mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE) {
// We don't have enough data. Delay it.
trackMap->mResampledData.InsertNullDataAtStart(
mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE - trackMap->mResampledData.GetDuration());
//.........这里部分代码省略.........