当前位置: 首页>>代码示例>>C++>>正文


C++ SourceMediaStream::AppendToTrack方法代码示例

本文整理汇总了C++中SourceMediaStream::AppendToTrack方法的典型用法代码示例。如果您正苦于以下问题:C++ SourceMediaStream::AppendToTrack方法的具体用法?C++ SourceMediaStream::AppendToTrack怎么用?C++ SourceMediaStream::AppendToTrack使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在SourceMediaStream的用法示例。


在下文中一共展示了SourceMediaStream::AppendToTrack方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: enter

void
MediaEngineWebRTCAudioSource::Process(const int channel,
  const webrtc::ProcessingTypes type, sample* audio10ms,
  const int length, const int samplingFreq, const bool isStereo)
{
  ReentrantMonitorAutoEnter enter(mMonitor);
  if (mState != kStarted)
    return;

  uint32_t len = mSources.Length();
  for (uint32_t i = 0; i < len; i++) {
    nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(length * sizeof(sample));

    sample* dest = static_cast<sample*>(buffer->Data());
    memcpy(dest, audio10ms, length * sizeof(sample));

    AudioSegment segment;
    nsAutoTArray<const sample*,1> channels;
    channels.AppendElement(dest);
    segment.AppendFrames(buffer.forget(), channels, length);

    SourceMediaStream *source = mSources[i];
    if (source) {
      // This is safe from any thread, and is safe if the track is Finished
      // or Destroyed
      source->AppendToTrack(mTrackID, &segment);
    }
  }

  return;
}
开发者ID:longfeey,项目名称:mozilla-central,代码行数:31,代码来源:MediaEngineWebRTCAudio.cpp

示例2: VideoSegment

void
DOMHwMediaStream::Init(MediaStream* stream)
{
  SourceMediaStream* srcStream = stream->AsSourceStream();

  if (srcStream) {
    VideoSegment segment;
#ifdef MOZ_WIDGET_GONK
    const StreamTime delta = STREAM_TIME_MAX; // Because MediaStreamGraph will run out frames in non-autoplay mode,
                                              // we must give it bigger frame length to cover this situation.
    mImageData.mOverlayId = DEFAULT_IMAGE_ID;
    mImageData.mSize.width = DEFAULT_IMAGE_WIDTH;
    mImageData.mSize.height = DEFAULT_IMAGE_HEIGHT;
    mOverlayImage->SetData(mImageData);

    RefPtr<Image> image = static_cast<Image*>(mOverlayImage.get());
    mozilla::gfx::IntSize size = image->GetSize();

    segment.AppendFrame(image.forget(), delta, size);
#endif
    srcStream->AddTrack(TRACK_VIDEO_PRIMARY, 0, new VideoSegment());
    srcStream->AppendToTrack(TRACK_VIDEO_PRIMARY, &segment);
    srcStream->FinishAddTracks();
    srcStream->AdvanceKnownTracksTime(STREAM_TIME_MAX);
  }
}
开发者ID:jasonmsoft,项目名称:system-addons,代码行数:26,代码来源:DOMMediaStream.cpp

示例3: AssertOwnerThread

void
DecodedStream::SendAudio(double aVolume, bool aIsSameOrigin,
                         const PrincipalHandle& aPrincipalHandle)
{
  AssertOwnerThread();

  if (!mInfo.HasAudio()) {
    return;
  }

  AudioSegment output;
  uint32_t rate = mInfo.mAudio.mRate;
  AutoTArray<RefPtr<MediaData>,10> audio;
  TrackID audioTrackId = mInfo.mAudio.mTrackId;
  SourceMediaStream* sourceStream = mData->mStream;

  // It's OK to hold references to the AudioData because AudioData
  // is ref-counted.
  mAudioQueue.GetElementsAfter(mData->mNextAudioTime, &audio);
  for (uint32_t i = 0; i < audio.Length(); ++i) {
    SendStreamAudio(mData.get(), mStartTime.ref(), audio[i], &output, rate,
                    aPrincipalHandle);
  }

  output.ApplyVolume(aVolume);

  if (!aIsSameOrigin) {
    output.ReplaceWithDisabled();
  }

  // |mNextAudioTime| is updated as we process each audio sample in
  // SendStreamAudio(). This is consistent with how |mNextVideoTime|
  // is updated for video samples.
  if (output.GetDuration() > 0) {
    sourceStream->AppendToTrack(audioTrackId, &output);
  }

  if (mAudioQueue.IsFinished() && !mData->mHaveSentFinishAudio) {
    sourceStream->EndTrack(audioTrackId);
    mData->mHaveSentFinishAudio = true;
  }
}
开发者ID:,项目名称:,代码行数:42,代码来源:

示例4: lock

void
MediaEngineWebRTCAudioSource::Process(int channel,
  webrtc::ProcessingTypes type, sample* audio10ms,
  int length, int samplingFreq, bool isStereo)
{
  MonitorAutoLock lock(mMonitor);
  if (mState != kStarted)
    return;

  uint32_t len = mSources.Length();
  for (uint32_t i = 0; i < len; i++) {
    nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(length * sizeof(sample));

    sample* dest = static_cast<sample*>(buffer->Data());
    memcpy(dest, audio10ms, length * sizeof(sample));

    AudioSegment segment;
    nsAutoTArray<const sample*,1> channels;
    channels.AppendElement(dest);
    segment.AppendFrames(buffer.forget(), channels, length);
    TimeStamp insertTime;
    segment.GetStartTime(insertTime);

    SourceMediaStream *source = mSources[i];
    if (source) {
      // This is safe from any thread, and is safe if the track is Finished
      // or Destroyed.
      // Make sure we include the stream and the track.
      // The 0:1 is a flag to note when we've done the final insert for a given input block.
      LogTime(AsyncLatencyLogger::AudioTrackInsertion, LATENCY_STREAM_ID(source, mTrackID),
              (i+1 < len) ? 0 : 1, insertTime);

      source->AppendToTrack(mTrackID, &segment);
    }
  }

  return;
}
开发者ID:MYSHLIFE,项目名称:gecko-dev,代码行数:38,代码来源:MediaEngineWebRTCAudio.cpp

示例5: IntSize

void
DOMHwMediaStream::SetImageSize(uint32_t width, uint32_t height)
{
#ifdef MOZ_WIDGET_GONK
  OverlayImage::Data imgData;

  imgData.mOverlayId = mOverlayImage->GetOverlayId();
  imgData.mSize = IntSize(width, height);
  mOverlayImage->SetData(imgData);
#endif

  SourceMediaStream* srcStream = GetInputStream()->AsSourceStream();
  StreamBuffer::Track* track = srcStream->FindTrack(TRACK_VIDEO_PRIMARY);

  if (!track || !track->GetSegment()) {
    return;
  }

#ifdef MOZ_WIDGET_GONK
  // Clear the old segment.
  // Changing the existing content of segment is a Very BAD thing, and this way will
  // confuse consumers of MediaStreams.
  // It is only acceptable for DOMHwMediaStream
  // because DOMHwMediaStream doesn't have consumers of TV streams currently.
  track->GetSegment()->Clear();

  // Change the image size.
  const StreamTime delta = STREAM_TIME_MAX;
  RefPtr<Image> image = static_cast<Image*>(mOverlayImage.get());
  mozilla::gfx::IntSize size = image->GetSize();
  VideoSegment segment;

  segment.AppendFrame(image.forget(), delta, size);
  srcStream->AppendToTrack(TRACK_VIDEO_PRIMARY, &segment);
#endif
}
开发者ID:jasonmsoft,项目名称:system-addons,代码行数:36,代码来源:DOMMediaStream.cpp

示例6: lock

void
MediaEngineWebRTCAudioSource::Process(int channel,
  webrtc::ProcessingTypes type, sample* audio10ms,
  int length, int samplingFreq, bool isStereo)
{
  // On initial capture, throw away all far-end data except the most recent sample
  // since it's already irrelevant and we want to keep avoid confusing the AEC far-end
  // input code with "old" audio.
  if (!mStarted) {
    mStarted  = true;
    while (gFarendObserver->Size() > 1) {
      FarEndAudioChunk *buffer = gFarendObserver->Pop(); // only call if size() > 0
      free(buffer);
    }
  }

  while (gFarendObserver->Size() > 0) {
    FarEndAudioChunk *buffer = gFarendObserver->Pop(); // only call if size() > 0
    if (buffer) {
      int length = buffer->mSamples;
      if (mVoERender->ExternalPlayoutData(buffer->mData,
                                          gFarendObserver->PlayoutFrequency(),
                                          gFarendObserver->PlayoutChannels(),
                                          mPlayoutDelay,
                                          length) == -1) {
        return;
      }
    }
    free(buffer);
  }

#ifdef PR_LOGGING
  mSamples += length;
  if (mSamples > samplingFreq) {
    mSamples %= samplingFreq; // just in case mSamples >> samplingFreq
    if (PR_LOG_TEST(GetMediaManagerLog(), PR_LOG_DEBUG)) {
      webrtc::EchoStatistics echo;

      mVoECallReport->GetEchoMetricSummary(echo);
#define DUMP_STATVAL(x) (x).min, (x).max, (x).average
      LOG(("Echo: ERL: %d/%d/%d, ERLE: %d/%d/%d, RERL: %d/%d/%d, NLP: %d/%d/%d",
           DUMP_STATVAL(echo.erl),
           DUMP_STATVAL(echo.erle),
           DUMP_STATVAL(echo.rerl),
           DUMP_STATVAL(echo.a_nlp)));
    }
  }
#endif

  MonitorAutoLock lock(mMonitor);
  if (mState != kStarted)
    return;

  uint32_t len = mSources.Length();
  for (uint32_t i = 0; i < len; i++) {
    nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(length * sizeof(sample));

    sample* dest = static_cast<sample*>(buffer->Data());
    memcpy(dest, audio10ms, length * sizeof(sample));

    AudioSegment segment;
    nsAutoTArray<const sample*,1> channels;
    channels.AppendElement(dest);
    segment.AppendFrames(buffer.forget(), channels, length);
    TimeStamp insertTime;
    segment.GetStartTime(insertTime);

    SourceMediaStream *source = mSources[i];
    if (source) {
      // This is safe from any thread, and is safe if the track is Finished
      // or Destroyed.
      // Make sure we include the stream and the track.
      // The 0:1 is a flag to note when we've done the final insert for a given input block.
      LogTime(AsyncLatencyLogger::AudioTrackInsertion, LATENCY_STREAM_ID(source, mTrackID),
              (i+1 < len) ? 0 : 1, insertTime);

      source->AppendToTrack(mTrackID, &segment);
    }
  }

  return;
}
开发者ID:JuannyWang,项目名称:gecko-dev,代码行数:82,代码来源:MediaEngineWebRTCAudio.cpp


注:本文中的SourceMediaStream::AppendToTrack方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。