本文整理汇总了C++中MediaStream类的典型用法代码示例。如果您正苦于以下问题:C++ MediaStream类的具体用法?C++ MediaStream怎么用?C++ MediaStream使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了MediaStream类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: Destination
double
AudioContext::CurrentTime() const
{
MediaStream* stream = Destination()->Stream();
return stream->StreamTimeToSeconds(stream->GetCurrentTime());
}
示例2: MOZ_ASSERT
void
AudioNodeExternalInputStream::ProcessInput(GraphTime aFrom, GraphTime aTo,
uint32_t aFlags)
{
// According to spec, number of outputs is always 1.
MOZ_ASSERT(mLastChunks.Length() == 1);
// GC stuff can result in our input stream being destroyed before this stream.
// Handle that.
if (!IsEnabled() || mInputs.IsEmpty() || mPassThrough) {
mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
AdvanceOutputSegment();
return;
}
MOZ_ASSERT(mInputs.Length() == 1);
MediaStream* source = mInputs[0]->GetSource();
nsAutoTArray<AudioSegment,1> audioSegments;
uint32_t inputChannels = 0;
for (StreamBuffer::TrackIter tracks(source->mBuffer, MediaSegment::AUDIO);
!tracks.IsEnded(); tracks.Next()) {
const StreamBuffer::Track& inputTrack = *tracks;
const AudioSegment& inputSegment =
*static_cast<AudioSegment*>(inputTrack.GetSegment());
if (inputSegment.IsNull()) {
continue;
}
AudioSegment& segment = *audioSegments.AppendElement();
GraphTime next;
for (GraphTime t = aFrom; t < aTo; t = next) {
MediaInputPort::InputInterval interval = mInputs[0]->GetNextInputInterval(t);
interval.mEnd = std::min(interval.mEnd, aTo);
if (interval.mStart >= interval.mEnd)
break;
next = interval.mEnd;
StreamTime outputStart = GraphTimeToStreamTime(interval.mStart);
StreamTime outputEnd = GraphTimeToStreamTime(interval.mEnd);
StreamTime ticks = outputEnd - outputStart;
if (interval.mInputIsBlocked) {
segment.AppendNullData(ticks);
} else {
StreamTime inputStart =
std::min(inputSegment.GetDuration(),
source->GraphTimeToStreamTime(interval.mStart));
StreamTime inputEnd =
std::min(inputSegment.GetDuration(),
source->GraphTimeToStreamTime(interval.mEnd));
segment.AppendSlice(inputSegment, inputStart, inputEnd);
// Pad if we're looking past the end of the track
segment.AppendNullData(ticks - (inputEnd - inputStart));
}
}
for (AudioSegment::ChunkIterator iter(segment); !iter.IsEnded(); iter.Next()) {
inputChannels = GetAudioChannelsSuperset(inputChannels, iter->ChannelCount());
}
}
uint32_t accumulateIndex = 0;
if (inputChannels) {
nsAutoTArray<float,GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer;
for (uint32_t i = 0; i < audioSegments.Length(); ++i) {
AudioChunk tmpChunk;
ConvertSegmentToAudioBlock(&audioSegments[i], &tmpChunk, inputChannels);
if (!tmpChunk.IsNull()) {
if (accumulateIndex == 0) {
AllocateAudioBlock(inputChannels, &mLastChunks[0]);
}
AccumulateInputChunk(accumulateIndex, tmpChunk, &mLastChunks[0], &downmixBuffer);
accumulateIndex++;
}
}
}
if (accumulateIndex == 0) {
mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
}
// Using AudioNodeStream's AdvanceOutputSegment to push the media stream graph along with null data.
AdvanceOutputSegment();
}
示例3: CopyTrackData
void TrackUnionStream::ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags)
{
if (IsFinishedOnGraphThread()) {
return;
}
nsAutoTArray<bool,8> mappedTracksFinished;
nsAutoTArray<bool,8> mappedTracksWithMatchingInputTracks;
for (uint32_t i = 0; i < mTrackMap.Length(); ++i) {
mappedTracksFinished.AppendElement(true);
mappedTracksWithMatchingInputTracks.AppendElement(false);
}
bool allFinished = !mInputs.IsEmpty();
bool allHaveCurrentData = !mInputs.IsEmpty();
for (uint32_t i = 0; i < mInputs.Length(); ++i) {
MediaStream* stream = mInputs[i]->GetSource();
if (!stream->IsFinishedOnGraphThread()) {
// XXX we really should check whether 'stream' has finished within time aTo,
// not just that it's finishing when all its queued data eventually runs
// out.
allFinished = false;
}
if (!stream->HasCurrentData()) {
allHaveCurrentData = false;
}
bool trackAdded = false;
for (StreamBuffer::TrackIter tracks(stream->GetStreamBuffer());
!tracks.IsEnded(); tracks.Next()) {
bool found = false;
for (uint32_t j = 0; j < mTrackMap.Length(); ++j) {
TrackMapEntry* map = &mTrackMap[j];
if (map->mInputPort == mInputs[i] && map->mInputTrackID == tracks->GetID()) {
bool trackFinished;
StreamBuffer::Track* outputTrack = mBuffer.FindTrack(map->mOutputTrackID);
if (!outputTrack || outputTrack->IsEnded()) {
trackFinished = true;
} else {
CopyTrackData(tracks.get(), j, aFrom, aTo, &trackFinished);
}
mappedTracksFinished[j] = trackFinished;
mappedTracksWithMatchingInputTracks[j] = true;
found = true;
break;
}
}
if (!found && (!mFilterCallback || mFilterCallback(tracks.get()))) {
bool trackFinished = false;
trackAdded = true;
uint32_t mapIndex = AddTrack(mInputs[i], tracks.get(), aFrom);
CopyTrackData(tracks.get(), mapIndex, aFrom, aTo, &trackFinished);
mappedTracksFinished.AppendElement(trackFinished);
mappedTracksWithMatchingInputTracks.AppendElement(true);
}
}
if (trackAdded) {
for (MediaStreamListener* l : mListeners) {
l->NotifyFinishedTrackCreation(Graph());
}
}
}
for (int32_t i = mTrackMap.Length() - 1; i >= 0; --i) {
if (mappedTracksFinished[i]) {
EndTrack(i);
} else {
allFinished = false;
}
if (!mappedTracksWithMatchingInputTracks[i]) {
mTrackMap.RemoveElementAt(i);
}
}
if (allFinished && mAutofinish && (aFlags & ALLOW_FINISH)) {
// All streams have finished and won't add any more tracks, and
// all our tracks have actually finished and been removed from our map,
// so we're finished now.
FinishOnGraphThread();
} else {
mBuffer.AdvanceKnownTracksTime(GraphTimeToStreamTime(aTo));
}
if (allHaveCurrentData) {
// We can make progress if we're not blocked
mHasCurrentData = true;
}
}
示例4: GetAudioChannelsSuperset
void
AudioNodeStream::ObtainInputBlock(AudioChunk& aTmpChunk, uint32_t aPortIndex)
{
uint32_t inputCount = mInputs.Length();
uint32_t outputChannelCount = 1;
nsAutoTArray<AudioChunk*,250> inputChunks;
for (uint32_t i = 0; i < inputCount; ++i) {
if (aPortIndex != mInputs[i]->InputNumber()) {
// This input is connected to a different port
continue;
}
MediaStream* s = mInputs[i]->GetSource();
AudioNodeStream* a = static_cast<AudioNodeStream*>(s);
MOZ_ASSERT(a == s->AsAudioNodeStream());
if (a->IsFinishedOnGraphThread() ||
a->IsAudioParamStream()) {
continue;
}
AudioChunk* chunk = &a->mLastChunks[mInputs[i]->OutputNumber()];
MOZ_ASSERT(chunk);
if (chunk->IsNull()) {
continue;
}
inputChunks.AppendElement(chunk);
outputChannelCount =
GetAudioChannelsSuperset(outputChannelCount, chunk->mChannelData.Length());
}
switch (mChannelCountMode) {
case ChannelCountMode::Explicit:
// Disregard the output channel count that we've calculated, and just use
// mNumberOfInputChannels.
outputChannelCount = mNumberOfInputChannels;
break;
case ChannelCountMode::Clamped_max:
// Clamp the computed output channel count to mNumberOfInputChannels.
outputChannelCount = std::min(outputChannelCount, mNumberOfInputChannels);
break;
case ChannelCountMode::Max:
// Nothing to do here, just shut up the compiler warning.
break;
}
uint32_t inputChunkCount = inputChunks.Length();
if (inputChunkCount == 0 ||
(inputChunkCount == 1 && inputChunks[0]->mChannelData.Length() == 0)) {
aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
return;
}
if (inputChunkCount == 1 &&
inputChunks[0]->mChannelData.Length() == outputChannelCount) {
aTmpChunk = *inputChunks[0];
return;
}
AllocateAudioBlock(outputChannelCount, &aTmpChunk);
float silenceChannel[WEBAUDIO_BLOCK_SIZE] = {0.f};
// The static storage here should be 1KB, so it's fine
nsAutoTArray<float, GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer;
for (uint32_t i = 0; i < inputChunkCount; ++i) {
AudioChunk* chunk = inputChunks[i];
nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channels;
channels.AppendElements(chunk->mChannelData);
if (channels.Length() < outputChannelCount) {
if (mChannelInterpretation == ChannelInterpretation::Speakers) {
AudioChannelsUpMix(&channels, outputChannelCount, nullptr);
NS_ASSERTION(outputChannelCount == channels.Length(),
"We called GetAudioChannelsSuperset to avoid this");
} else {
// Fill up the remaining channels by zeros
for (uint32_t j = channels.Length(); j < outputChannelCount; ++j) {
channels.AppendElement(silenceChannel);
}
}
} else if (channels.Length() > outputChannelCount) {
if (mChannelInterpretation == ChannelInterpretation::Speakers) {
nsAutoTArray<float*,GUESS_AUDIO_CHANNELS> outputChannels;
outputChannels.SetLength(outputChannelCount);
downmixBuffer.SetLength(outputChannelCount * WEBAUDIO_BLOCK_SIZE);
for (uint32_t j = 0; j < outputChannelCount; ++j) {
outputChannels[j] = &downmixBuffer[j * WEBAUDIO_BLOCK_SIZE];
}
AudioChannelsDownMix(channels, outputChannels.Elements(),
outputChannelCount, WEBAUDIO_BLOCK_SIZE);
channels.SetLength(outputChannelCount);
for (uint32_t j = 0; j < channels.Length(); ++j) {
channels[j] = outputChannels[j];
}
} else {
// Drop the remaining channels
channels.RemoveElementsAt(outputChannelCount,
channels.Length() - outputChannelCount);
}
}
//.........这里部分代码省略.........
示例5: EnsureTrack
void
AudioCaptureStream::ProcessInput(GraphTime aFrom, GraphTime aTo,
uint32_t aFlags)
{
if (!mStarted) {
return;
}
uint32_t inputCount = mInputs.Length();
StreamTracks::Track* track = EnsureTrack(mTrackId);
// Notify the DOM everything is in order.
if (!mTrackCreated) {
for (uint32_t i = 0; i < mListeners.Length(); i++) {
MediaStreamListener* l = mListeners[i];
AudioSegment tmp;
l->NotifyQueuedTrackChanges(
Graph(), mTrackId, 0, TrackEventCommand::TRACK_EVENT_CREATED, tmp);
l->NotifyFinishedTrackCreation(Graph());
}
mTrackCreated = true;
}
if (IsFinishedOnGraphThread()) {
return;
}
// If the captured stream is connected back to a object on the page (be it an
// HTMLMediaElement with a stream as source, or an AudioContext), a cycle
// situation occur. This can work if it's an AudioContext with at least one
// DelayNode, but the MSG will mute the whole cycle otherwise.
if (InMutedCycle() || inputCount == 0) {
track->Get<AudioSegment>()->AppendNullData(aTo - aFrom);
} else {
// We mix down all the tracks of all inputs, to a stereo track. Everything
// is {up,down}-mixed to stereo.
mMixer.StartMixing();
AudioSegment output;
for (uint32_t i = 0; i < inputCount; i++) {
MediaStream* s = mInputs[i]->GetSource();
StreamTracks::TrackIter tracks(s->GetStreamTracks(), MediaSegment::AUDIO);
while (!tracks.IsEnded()) {
AudioSegment* inputSegment = tracks->Get<AudioSegment>();
StreamTime inputStart = s->GraphTimeToStreamTimeWithBlocking(aFrom);
StreamTime inputEnd = s->GraphTimeToStreamTimeWithBlocking(aTo);
if (tracks->IsEnded() && inputSegment->GetDuration() <= inputEnd) {
// If the input track has ended and we have consumed all its data it
// can be ignored.
continue;
}
AudioSegment toMix;
toMix.AppendSlice(*inputSegment, inputStart, inputEnd);
// Care for streams blocked in the [aTo, aFrom] range.
if (inputEnd - inputStart < aTo - aFrom) {
toMix.AppendNullData((aTo - aFrom) - (inputEnd - inputStart));
}
toMix.Mix(mMixer, MONO, Graph()->GraphRate());
tracks.Next();
}
}
// This calls MixerCallback below
mMixer.FinishMixing();
}
// Regardless of the status of the input tracks, we go foward.
mTracks.AdvanceKnownTracksTime(GraphTimeToStreamTimeWithBlocking((aTo)));
}
示例6: MOZ_ASSERT
void
AudioNodeExternalInputStream::ProcessInput(GraphTime aFrom, GraphTime aTo,
uint32_t aFlags)
{
// According to spec, number of outputs is always 1.
MOZ_ASSERT(mLastChunks.Length() == 1);
// GC stuff can result in our input stream being destroyed before this stream.
// Handle that.
if (!IsEnabled() || mInputs.IsEmpty() || mPassThrough) {
mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
return;
}
MOZ_ASSERT(mInputs.Length() == 1);
MediaStream* source = mInputs[0]->GetSource();
AutoTArray<AudioSegment,1> audioSegments;
uint32_t inputChannels = 0;
for (StreamTracks::TrackIter tracks(source->mTracks);
!tracks.IsEnded(); tracks.Next()) {
const StreamTracks::Track& inputTrack = *tracks;
if (!mInputs[0]->PassTrackThrough(tracks->GetID())) {
continue;
}
if (inputTrack.GetSegment()->GetType() == MediaSegment::VIDEO) {
MOZ_ASSERT(false, "AudioNodeExternalInputStream shouldn't have video tracks");
continue;
}
const AudioSegment& inputSegment =
*static_cast<AudioSegment*>(inputTrack.GetSegment());
if (inputSegment.IsNull()) {
continue;
}
AudioSegment& segment = *audioSegments.AppendElement();
GraphTime next;
for (GraphTime t = aFrom; t < aTo; t = next) {
MediaInputPort::InputInterval interval = mInputs[0]->GetNextInputInterval(t);
interval.mEnd = std::min(interval.mEnd, aTo);
if (interval.mStart >= interval.mEnd)
break;
next = interval.mEnd;
// We know this stream does not block during the processing interval ---
// we're not finished, we don't underrun, and we're not suspended.
StreamTime outputStart = GraphTimeToStreamTime(interval.mStart);
StreamTime outputEnd = GraphTimeToStreamTime(interval.mEnd);
StreamTime ticks = outputEnd - outputStart;
if (interval.mInputIsBlocked) {
segment.AppendNullData(ticks);
} else {
// The input stream is not blocked in this interval, so no need to call
// GraphTimeToStreamTimeWithBlocking.
StreamTime inputStart =
std::min(inputSegment.GetDuration(),
source->GraphTimeToStreamTime(interval.mStart));
StreamTime inputEnd =
std::min(inputSegment.GetDuration(),
source->GraphTimeToStreamTime(interval.mEnd));
segment.AppendSlice(inputSegment, inputStart, inputEnd);
// Pad if we're looking past the end of the track
segment.AppendNullData(ticks - (inputEnd - inputStart));
}
}
for (AudioSegment::ChunkIterator iter(segment); !iter.IsEnded(); iter.Next()) {
inputChannels = GetAudioChannelsSuperset(inputChannels, iter->ChannelCount());
}
}
uint32_t accumulateIndex = 0;
if (inputChannels) {
DownmixBufferType downmixBuffer;
ASSERT_ALIGNED16(downmixBuffer.Elements());
for (uint32_t i = 0; i < audioSegments.Length(); ++i) {
AudioBlock tmpChunk;
ConvertSegmentToAudioBlock(&audioSegments[i], &tmpChunk, inputChannels);
if (!tmpChunk.IsNull()) {
if (accumulateIndex == 0) {
mLastChunks[0].AllocateChannels(inputChannels);
}
AccumulateInputChunk(accumulateIndex, tmpChunk, &mLastChunks[0], &downmixBuffer);
accumulateIndex++;
}
}
}
if (accumulateIndex == 0) {
mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
}
}
示例7: ProcessBusMessageWrapper
int MediaStream::ProcessBusMessageWrapper(GstBus* bus, GstMessage* message,
gpointer user_data)
{
MediaStream* instance = (MediaStream*)user_data;
return instance->ProcessBusMessage(bus, message, user_data);
}
示例8: TRACE_AUDIO_CALLBACK_COMMENT
void TrackUnionStream::ProcessInput(GraphTime aFrom, GraphTime aTo,
uint32_t aFlags) {
TRACE_AUDIO_CALLBACK_COMMENT("TrackUnionStream %p", this);
if (IsFinishedOnGraphThread()) {
return;
}
AutoTArray<bool, 8> mappedTracksFinished;
AutoTArray<bool, 8> mappedTracksWithMatchingInputTracks;
for (uint32_t i = 0; i < mTrackMap.Length(); ++i) {
mappedTracksFinished.AppendElement(true);
mappedTracksWithMatchingInputTracks.AppendElement(false);
}
AutoTArray<MediaInputPort*, 32> inputs(mInputs);
inputs.AppendElements(mSuspendedInputs);
bool allFinished = !inputs.IsEmpty();
bool allHaveCurrentData = !inputs.IsEmpty();
for (uint32_t i = 0; i < inputs.Length(); ++i) {
MediaStream* stream = inputs[i]->GetSource();
if (!stream->IsFinishedOnGraphThread()) {
// XXX we really should check whether 'stream' has finished within time
// aTo, not just that it's finishing when all its queued data eventually
// runs out.
allFinished = false;
}
if (!stream->HasCurrentData()) {
allHaveCurrentData = false;
}
for (StreamTracks::TrackIter tracks(stream->GetStreamTracks());
!tracks.IsEnded(); tracks.Next()) {
bool found = false;
for (uint32_t j = 0; j < mTrackMap.Length(); ++j) {
TrackMapEntry* map = &mTrackMap[j];
if (map->mInputPort == inputs[i] &&
map->mInputTrackID == tracks->GetID()) {
bool trackFinished = false;
StreamTracks::Track* outputTrack =
mTracks.FindTrack(map->mOutputTrackID);
found = true;
if (!outputTrack || outputTrack->IsEnded() ||
!inputs[i]->PassTrackThrough(tracks->GetID())) {
trackFinished = true;
} else {
CopyTrackData(tracks.get(), j, aFrom, aTo, &trackFinished);
}
mappedTracksFinished[j] = trackFinished;
mappedTracksWithMatchingInputTracks[j] = true;
break;
}
}
if (!found && inputs[i]->AllowCreationOf(tracks->GetID())) {
bool trackFinished = false;
uint32_t mapIndex = AddTrack(inputs[i], tracks.get(), aFrom);
CopyTrackData(tracks.get(), mapIndex, aFrom, aTo, &trackFinished);
mappedTracksFinished.AppendElement(trackFinished);
mappedTracksWithMatchingInputTracks.AppendElement(true);
}
}
}
for (int32_t i = mTrackMap.Length() - 1; i >= 0; --i) {
if (mappedTracksFinished[i]) {
EndTrack(i);
} else {
allFinished = false;
}
if (!mappedTracksWithMatchingInputTracks[i]) {
for (auto listener : mTrackMap[i].mOwnedDirectListeners) {
// Remove listeners while the entry still exists.
RemoveDirectTrackListenerImpl(listener, mTrackMap[i].mOutputTrackID);
}
mTrackMap.RemoveElementAt(i);
}
}
if (allFinished && mAutofinish && (aFlags & ALLOW_FINISH)) {
// All streams have finished and won't add any more tracks, and
// all our tracks have actually finished and been removed from our map,
// so we're finished now.
FinishOnGraphThread();
}
if (allHaveCurrentData) {
// We can make progress if we're not blocked
mHasCurrentData = true;
}
}
示例9: STREAM_LOG
uint32_t TrackUnionStream::AddTrack(MediaInputPort* aPort, StreamTracks::Track* aTrack,
GraphTime aFrom)
{
STREAM_LOG(LogLevel::Verbose, ("TrackUnionStream %p adding track %d for "
"input stream %p track %d, desired id %d",
this, aTrack->GetID(), aPort->GetSource(),
aTrack->GetID(),
aPort->GetDestinationTrackId()));
TrackID id;
if (IsTrackIDExplicit(id = aPort->GetDestinationTrackId())) {
MOZ_ASSERT(id >= mNextAvailableTrackID &&
mUsedTracks.BinaryIndexOf(id) == mUsedTracks.NoIndex,
"Desired destination id taken. Only provide a destination ID "
"if you can assure its availability, or we may not be able "
"to bind to the correct DOM-side track.");
#ifdef DEBUG
for (size_t i = 0; mInputs[i] != aPort; ++i) {
MOZ_ASSERT(mInputs[i]->GetSourceTrackId() != TRACK_ANY,
"You are adding a MediaInputPort with a track mapping "
"while there already exist generic MediaInputPorts for this "
"destination stream. This can lead to TrackID collisions!");
}
#endif
mUsedTracks.InsertElementSorted(id);
} else if ((id = aTrack->GetID()) &&
id > mNextAvailableTrackID &&
mUsedTracks.BinaryIndexOf(id) == mUsedTracks.NoIndex) {
// Input id available. Mark it used in mUsedTracks.
mUsedTracks.InsertElementSorted(id);
} else {
// No desired destination id and Input id taken, allocate a new one.
id = mNextAvailableTrackID;
// Update mNextAvailableTrackID and prune any mUsedTracks members it now
// covers.
while (1) {
if (!mUsedTracks.RemoveElementSorted(++mNextAvailableTrackID)) {
// Not in use. We're done.
break;
}
}
}
// Round up the track start time so the track, if anything, starts a
// little later than the true time. This means we'll have enough
// samples in our input stream to go just beyond the destination time.
StreamTime outputStart = GraphTimeToStreamTimeWithBlocking(aFrom);
nsAutoPtr<MediaSegment> segment;
segment = aTrack->GetSegment()->CreateEmptyClone();
for (uint32_t j = 0; j < mListeners.Length(); ++j) {
MediaStreamListener* l = mListeners[j];
l->NotifyQueuedTrackChanges(Graph(), id, outputStart,
TrackEventCommand::TRACK_EVENT_CREATED,
*segment,
aPort->GetSource(), aTrack->GetID());
}
segment->AppendNullData(outputStart);
StreamTracks::Track* track =
&mTracks.AddTrack(id, outputStart, segment.forget());
STREAM_LOG(LogLevel::Debug, ("TrackUnionStream %p added track %d for input stream %p track %d, start ticks %lld",
this, track->GetID(), aPort->GetSource(), aTrack->GetID(),
(long long)outputStart));
TrackMapEntry* map = mTrackMap.AppendElement();
map->mEndOfConsumedInputTicks = 0;
map->mEndOfLastInputIntervalInInputStream = -1;
map->mEndOfLastInputIntervalInOutputStream = -1;
map->mInputPort = aPort;
map->mInputTrackID = aTrack->GetID();
map->mOutputTrackID = track->GetID();
map->mSegment = aTrack->GetSegment()->CreateEmptyClone();
for (int32_t i = mPendingDirectTrackListeners.Length() - 1; i >= 0; --i) {
TrackBound<DirectMediaStreamTrackListener>& bound =
mPendingDirectTrackListeners[i];
if (bound.mTrackID != map->mOutputTrackID) {
continue;
}
MediaStream* source = map->mInputPort->GetSource();
map->mOwnedDirectListeners.AppendElement(bound.mListener);
DisabledTrackMode currentMode = GetDisabledTrackMode(bound.mTrackID);
if (currentMode != DisabledTrackMode::ENABLED) {
bound.mListener->IncreaseDisabled(currentMode);
}
STREAM_LOG(LogLevel::Debug, ("TrackUnionStream %p adding direct listener "
"%p for track %d. Forwarding to input "
"stream %p track %d.",
this, bound.mListener.get(), bound.mTrackID,
source, map->mInputTrackID));
source->AddDirectTrackListenerImpl(bound.mListener.forget(),
map->mInputTrackID);
mPendingDirectTrackListeners.RemoveElementAt(i);
}
return mTrackMap.Length() - 1;
}
示例10: MOZ_ASSERT
void TrackUnionStream::CopyTrackData(StreamTracks::Track* aInputTrack,
uint32_t aMapIndex, GraphTime aFrom, GraphTime aTo,
bool* aOutputTrackFinished)
{
TrackMapEntry* map = &mTrackMap[aMapIndex];
StreamTracks::Track* outputTrack = mTracks.FindTrack(map->mOutputTrackID);
MOZ_ASSERT(outputTrack && !outputTrack->IsEnded(), "Can't copy to ended track");
MediaSegment* segment = map->mSegment;
MediaStream* source = map->mInputPort->GetSource();
GraphTime next;
*aOutputTrackFinished = false;
for (GraphTime t = aFrom; t < aTo; t = next) {
MediaInputPort::InputInterval interval = map->mInputPort->GetNextInputInterval(t);
interval.mEnd = std::min(interval.mEnd, aTo);
StreamTime inputEnd = source->GraphTimeToStreamTimeWithBlocking(interval.mEnd);
StreamTime inputTrackEndPoint = STREAM_TIME_MAX;
if (aInputTrack->IsEnded() &&
aInputTrack->GetEnd() <= inputEnd) {
inputTrackEndPoint = aInputTrack->GetEnd();
*aOutputTrackFinished = true;
}
if (interval.mStart >= interval.mEnd) {
break;
}
StreamTime ticks = interval.mEnd - interval.mStart;
next = interval.mEnd;
StreamTime outputStart = outputTrack->GetEnd();
if (interval.mInputIsBlocked) {
// Maybe the input track ended?
segment->AppendNullData(ticks);
STREAM_LOG(LogLevel::Verbose, ("TrackUnionStream %p appending %lld ticks of null data to track %d",
this, (long long)ticks, outputTrack->GetID()));
} else if (InMutedCycle()) {
segment->AppendNullData(ticks);
} else {
if (source->IsSuspended()) {
segment->AppendNullData(aTo - aFrom);
} else {
MOZ_ASSERT(outputTrack->GetEnd() == GraphTimeToStreamTimeWithBlocking(interval.mStart),
"Samples missing");
StreamTime inputStart = source->GraphTimeToStreamTimeWithBlocking(interval.mStart);
segment->AppendSlice(*aInputTrack->GetSegment(),
std::min(inputTrackEndPoint, inputStart),
std::min(inputTrackEndPoint, inputEnd));
}
}
ApplyTrackDisabling(outputTrack->GetID(), segment);
for (uint32_t j = 0; j < mListeners.Length(); ++j) {
MediaStreamListener* l = mListeners[j];
// Separate Audio and Video.
if (segment->GetType() == MediaSegment::AUDIO) {
l->NotifyQueuedAudioData(Graph(), outputTrack->GetID(),
outputStart,
*static_cast<AudioSegment*>(segment),
map->mInputPort->GetSource(),
map->mInputTrackID);
}
}
for (TrackBound<MediaStreamTrackListener>& b : mTrackListeners) {
if (b.mTrackID != outputTrack->GetID()) {
continue;
}
b.mListener->NotifyQueuedChanges(Graph(), outputStart, *segment);
}
outputTrack->GetSegment()->AppendFrom(segment);
}
}
示例11: readDoneCallback
int
readDoneCallback( void* data, const char* cookie, size_t bufferSize, void* buffer )
{
MediaStream* mediaStream = static_cast< MediaStream * >( data );
return mediaStream->readDoneCallback( cookie, bufferSize, buffer );
}
示例12: readCallback
int readCallback( void* data, const char* cookie, int64_t* dts, int64_t* pts, unsigned* flags, size_t* bufferSize, void** buffer )
{
MediaStream* mediaStream = static_cast< MediaStream * >( data );
return mediaStream->readCallback( cookie, dts, pts, flags, bufferSize, buffer );
}
示例13: AdvanceOutputSegment
void
AudioNodeExternalInputStream::ProcessInput(GraphTime aFrom, GraphTime aTo,
uint32_t aFlags)
{
// According to spec, number of outputs is always 1.
mLastChunks.SetLength(1);
// GC stuff can result in our input stream being destroyed before this stream.
// Handle that.
if (mInputs.IsEmpty()) {
mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
AdvanceOutputSegment();
return;
}
MOZ_ASSERT(mInputs.Length() == 1);
MediaStream* source = mInputs[0]->GetSource();
nsAutoTArray<AudioSegment,1> audioSegments;
nsAutoTArray<bool,1> trackMapEntriesUsed;
uint32_t inputChannels = 0;
for (StreamBuffer::TrackIter tracks(source->mBuffer, MediaSegment::AUDIO);
!tracks.IsEnded(); tracks.Next()) {
const StreamBuffer::Track& inputTrack = *tracks;
// Create a TrackMapEntry if necessary.
size_t trackMapIndex = GetTrackMapEntry(inputTrack, aFrom);
// Maybe there's nothing in this track yet. If so, ignore it. (While the
// track is only playing silence, we may not be able to determine the
// correct number of channels to start resampling.)
if (trackMapIndex == nsTArray<TrackMapEntry>::NoIndex) {
continue;
}
while (trackMapEntriesUsed.Length() <= trackMapIndex) {
trackMapEntriesUsed.AppendElement(false);
}
trackMapEntriesUsed[trackMapIndex] = true;
TrackMapEntry* trackMap = &mTrackMap[trackMapIndex];
AudioSegment segment;
GraphTime next;
TrackRate inputTrackRate = inputTrack.GetRate();
for (GraphTime t = aFrom; t < aTo; t = next) {
MediaInputPort::InputInterval interval = mInputs[0]->GetNextInputInterval(t);
interval.mEnd = std::min(interval.mEnd, aTo);
if (interval.mStart >= interval.mEnd)
break;
next = interval.mEnd;
// Ticks >= startTicks and < endTicks are in the interval
StreamTime outputEnd = GraphTimeToStreamTime(interval.mEnd);
TrackTicks startTicks = trackMap->mSamplesPassedToResampler + segment.GetDuration();
StreamTime outputStart = GraphTimeToStreamTime(interval.mStart);
NS_ASSERTION(startTicks == TimeToTicksRoundUp(inputTrackRate, outputStart),
"Samples missing");
TrackTicks endTicks = TimeToTicksRoundUp(inputTrackRate, outputEnd);
TrackTicks ticks = endTicks - startTicks;
if (interval.mInputIsBlocked) {
segment.AppendNullData(ticks);
} else {
// See comments in TrackUnionStream::CopyTrackData
StreamTime inputStart = source->GraphTimeToStreamTime(interval.mStart);
StreamTime inputEnd = source->GraphTimeToStreamTime(interval.mEnd);
TrackTicks inputTrackEndPoint =
inputTrack.IsEnded() ? inputTrack.GetEnd() : TRACK_TICKS_MAX;
if (trackMap->mEndOfLastInputIntervalInInputStream != inputStart ||
trackMap->mEndOfLastInputIntervalInOutputStream != outputStart) {
// Start of a new series of intervals where neither stream is blocked.
trackMap->mEndOfConsumedInputTicks = TimeToTicksRoundDown(inputTrackRate, inputStart) - 1;
}
TrackTicks inputStartTicks = trackMap->mEndOfConsumedInputTicks;
TrackTicks inputEndTicks = inputStartTicks + ticks;
trackMap->mEndOfConsumedInputTicks = inputEndTicks;
trackMap->mEndOfLastInputIntervalInInputStream = inputEnd;
trackMap->mEndOfLastInputIntervalInOutputStream = outputEnd;
if (inputStartTicks < 0) {
// Data before the start of the track is just null.
segment.AppendNullData(-inputStartTicks);
inputStartTicks = 0;
}
if (inputEndTicks > inputStartTicks) {
segment.AppendSlice(*inputTrack.GetSegment(),
std::min(inputTrackEndPoint, inputStartTicks),
std::min(inputTrackEndPoint, inputEndTicks));
}
// Pad if we're looking past the end of the track
segment.AppendNullData(ticks - segment.GetDuration());
}
}
trackMap->mSamplesPassedToResampler += segment.GetDuration();
trackMap->ResampleInputData(&segment);
if (trackMap->mResampledData.GetDuration() < mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE) {
// We don't have enough data. Delay it.
trackMap->mResampledData.InsertNullDataAtStart(
mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE - trackMap->mResampledData.GetDuration());
//.........这里部分代码省略.........
示例14: GetAudioChannelsSuperset
AudioChunk*
AudioNodeStream::ObtainInputBlock(AudioChunk* aTmpChunk)
{
uint32_t inputCount = mInputs.Length();
uint32_t outputChannelCount = 0;
nsAutoTArray<AudioChunk*,250> inputChunks;
for (uint32_t i = 0; i < inputCount; ++i) {
MediaStream* s = mInputs[i]->GetSource();
AudioNodeStream* a = static_cast<AudioNodeStream*>(s);
MOZ_ASSERT(a == s->AsAudioNodeStream());
if (a->IsFinishedOnGraphThread()) {
continue;
}
AudioChunk* chunk = &a->mLastChunk;
// XXX when we implement DelayNode, this will no longer be true and we'll
// need to treat a null chunk (when the DelayNode hasn't had a chance
// to produce data yet) as silence here.
MOZ_ASSERT(chunk);
if (chunk->IsNull()) {
continue;
}
inputChunks.AppendElement(chunk);
outputChannelCount =
GetAudioChannelsSuperset(outputChannelCount, chunk->mChannelData.Length());
}
uint32_t inputChunkCount = inputChunks.Length();
if (inputChunkCount == 0) {
aTmpChunk->SetNull(WEBAUDIO_BLOCK_SIZE);
return aTmpChunk;
}
if (inputChunkCount == 1) {
return inputChunks[0];
}
AllocateAudioBlock(outputChannelCount, aTmpChunk);
for (uint32_t i = 0; i < inputChunkCount; ++i) {
AudioChunk* chunk = inputChunks[i];
nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channels;
channels.AppendElements(chunk->mChannelData);
if (channels.Length() < outputChannelCount) {
AudioChannelsUpMix(&channels, outputChannelCount, nullptr);
NS_ASSERTION(outputChannelCount == channels.Length(),
"We called GetAudioChannelsSuperset to avoid this");
}
for (uint32_t c = 0; c < channels.Length(); ++c) {
const float* inputData = static_cast<const float*>(channels[c]);
float* outputData = static_cast<float*>(const_cast<void*>(aTmpChunk->mChannelData[c]));
if (inputData) {
if (i == 0) {
AudioBlockCopyChannelWithScale(inputData, chunk->mVolume, outputData);
} else {
AudioBlockAddChannelWithScale(inputData, chunk->mVolume, outputData);
}
} else {
if (i == 0) {
memset(outputData, 0, WEBAUDIO_BLOCK_SIZE*sizeof(float));
}
}
}
}
return aTmpChunk;
}
示例15: memset
/* changed by [email protected] (see relaod.txt for info) */
int QTFileBroadcaster::Play(char *mTimeFile)
/* ***************************************************** */
{
SInt16 err = 0;
Float64 transmitTime = 0;
MediaStream *theStreamPtr = NULL;
RTpPacket rtpPacket;
unsigned int sleptTime;
SInt32 movieStartOffset = 0; //z
Bool16 negativeTime = false;
fMovieDuration = fRTPFilePtr->GetMovieDuration();
fSendTimeOffset = 0.0;
fMovieStart = true;
fNumMoviesPlayed ++;
if (fMovieEndTime > 0) // take into account the movie load time as well as the last movie early end.
{ UInt64 timeNow = PlayListUtils::Milliseconds();
fMovieIntervalTime = timeNow - fMovieEndTime;
SInt32 earlySleepTimeMilli = (SInt32)(fMovieTimeDiffMilli - fMovieIntervalTime);
earlySleepTimeMilli -= 40; // Don't sleep the entire time we need some time to execute or else we will be late
if (earlySleepTimeMilli > 0)
{ OSThread::Sleep( earlySleepTimeMilli);
}
}
fMovieStartTime = PlayListUtils::Milliseconds();
fMediaStreamList.MovieStarted(fMovieStartTime);
/* changed by [email protected] (see relaod.txt for info) */
if(mTimeFile!=NULL)
{
FILE *fTimeFile = NULL;
struct timeval start, dur, end;
struct tm tm_start, tm_dur, tm_end, timeResult;
memset (&start,0, sizeof(start));
SInt64 timenow = OS::Milliseconds();
start.tv_sec = (long) OS::TimeMilli_To_UnixTimeSecs(timenow);
start.tv_usec = (long) ((OS::TimeMilli_To_UnixTimeMilli(timenow) - (start.tv_sec * 1000)) * 1000);
dur.tv_sec = (long)fMovieDuration;
dur.tv_usec = (long)((fMovieDuration - dur.tv_sec) * 1000000);
end.tv_sec = start.tv_sec + dur.tv_sec + (long)((start.tv_usec + dur.tv_usec) / 1000000);
end.tv_usec = (start.tv_usec + dur.tv_usec) % 1000000;
time_t startSecs = start.tv_sec;
time_t endSecs = end.tv_sec;
memcpy(&tm_start, qtss_localtime(&startSecs, &timeResult), sizeof(struct tm));
memcpy(&tm_end, qtss_localtime(&endSecs, &timeResult), sizeof(struct tm));
tm_dur.tm_hour = dur.tv_sec / 3600;
tm_dur.tm_min = (dur.tv_sec % 3600) / 60;
tm_dur.tm_sec = (dur.tv_sec % 3600) % 60;
// initialize all current movie parameters to unkown ("-").
::strcpy(fCurrentMovieName, "-");
::strcpy(fCurrentMovieCopyright, "-");
::strcpy(fCurrentMovieComment, "-");
::strcpy(fCurrentMovieAuthor, "-");
::strcpy(fCurrentMovieArtist, "-");
::strcpy(fCurrentMovieAlbum, "-");
/* save start time, stop time and length of currently playing song to .current file */
fTimeFile = fopen(mTimeFile, "a");
if(fTimeFile)
{
SimpleString *theQTTextPtr = fMovieSDPParser->fQTTextLines.Begin();
while (theQTTextPtr != NULL)
{
char tmp[256];
::memcpy(tmp, theQTTextPtr->fTheString, theQTTextPtr->fLen);
tmp[theQTTextPtr->fLen] = 0;
// if this SDP parameter is needed for logging then cache it here so
// we can log it later.
if (::strstr(theQTTextPtr->fTheString, "a=x-qt-text-nam:")!=NULL)
::strcpy(fCurrentMovieName, &tmp[16]);
if (::strstr(theQTTextPtr->fTheString, "a=x-qt-text-cpy:")!=NULL)
::strcpy(fCurrentMovieCopyright, &tmp[16]);
if (::strstr(theQTTextPtr->fTheString, "a=x-qt-text-cmt:")!=NULL)
::strcpy(fCurrentMovieComment, &tmp[16]);
if (::strstr(theQTTextPtr->fTheString, "a=x-qt-text-aut:")!=NULL)
::strcpy(fCurrentMovieAuthor, &tmp[16]);
if (::strstr(theQTTextPtr->fTheString, "a=x-qt-text-ART:")!=NULL)
::strcpy(fCurrentMovieArtist, &tmp[16]);
if (::strstr(theQTTextPtr->fTheString, "a=x-qt-text-alb:")!=NULL)
::strcpy(fCurrentMovieAlbum, &tmp[16]);
fwrite(theQTTextPtr->fTheString,theQTTextPtr->fLen, sizeof(char),fTimeFile);
qtss_fprintf(fTimeFile,"\n");
theQTTextPtr = fMovieSDPParser->fQTTextLines.Next();
}
time_t startTime = (time_t) start.tv_sec;
time_t endTime = (time_t) end.tv_sec;
char buffer[kTimeStrSize];
char *timestringStart = qtss_ctime(&startTime, buffer, sizeof(buffer));
qtss_fprintf(fTimeFile,"b=%02d:%02d:%02d:%06d %ld %s", (int) tm_start.tm_hour, (int) tm_start.tm_min, (int) tm_start.tm_sec, (int)start.tv_usec, (long int) startTime, timestringStart);
//.........这里部分代码省略.........