当前位置: 首页>>代码示例>>C++>>正文


C++ CheckedInt64::value方法代码示例

本文整理汇总了C++中CheckedInt64::value方法的典型用法代码示例。如果您正苦于以下问题:C++ CheckedInt64::value方法的具体用法?C++ CheckedInt64::value怎么用?C++ CheckedInt64::value使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在CheckedInt64的用法示例。


在下文中一共展示了CheckedInt64::value方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: UsecsToFrames

bool
DecodedAudioDataSink::PlayAudio()
{
  // See if there's a gap in the audio. If there is, push silence into the
  // audio hardware, so we can play across the gap.
  // Calculate the timestamp of the next chunk of audio in numbers of
  // samples.
  NS_ASSERTION(AudioQueue().GetSize() > 0, "Should have data to play");
  CheckedInt64 sampleTime = UsecsToFrames(AudioQueue().PeekFront()->mTime, mInfo.mRate);

  // Calculate the number of frames that have been pushed onto the audio hardware.
  CheckedInt64 playedFrames = UsecsToFrames(mStartTime, mInfo.mRate) +
                              static_cast<int64_t>(mWritten);

  CheckedInt64 missingFrames = sampleTime - playedFrames;
  if (!missingFrames.isValid() || !sampleTime.isValid()) {
    NS_WARNING("Int overflow adding in AudioLoop");
    return false;
  }

  if (missingFrames.value() > AUDIO_FUZZ_FRAMES) {
    // The next audio chunk begins some time after the end of the last chunk
    // we pushed to the audio hardware. We must push silence into the audio
    // hardware so that the next audio chunk begins playback at the correct
    // time.
    missingFrames = std::min<int64_t>(UINT32_MAX, missingFrames.value());
    mWritten += PlaySilence(static_cast<uint32_t>(missingFrames.value()));
  } else {
    mWritten += PlayFromAudioQueue();
  }

  return true;
}
开发者ID:norihirou,项目名称:gecko-dev,代码行数:33,代码来源:DecodedAudioDataSink.cpp

示例2: if

// Makes sure that aStart and aEnd is less then or equal to aSize and greater
// than 0
static void
ParseSize(int64_t aSize, int64_t& aStart, int64_t& aEnd)
{
  CheckedInt64 newStartOffset = aStart;
  if (aStart < -aSize) {
    newStartOffset = 0;
  }
  else if (aStart < 0) {
    newStartOffset += aSize;
  }
  else if (aStart > aSize) {
    newStartOffset = aSize;
  }

  CheckedInt64 newEndOffset = aEnd;
  if (aEnd < -aSize) {
    newEndOffset = 0;
  }
  else if (aEnd < 0) {
    newEndOffset += aSize;
  }
  else if (aEnd > aSize) {
    newEndOffset = aSize;
  }

  if (!newStartOffset.isValid() || !newEndOffset.isValid() ||
      newStartOffset.value() >= newEndOffset.value()) {
    aStart = aEnd = 0;
  }
  else {
    aStart = newStartOffset.value();
    aEnd = newEndOffset.value();
  }
}
开发者ID:AtulKumar2,项目名称:gecko-dev,代码行数:36,代码来源:File.cpp

示例3: mon

void
AudioSink::AudioLoop()
{
  AssertOnAudioThread();
  SINK_LOG("AudioLoop started");

  if (NS_FAILED(InitializeAudioStream())) {
    NS_WARNING("Initializing AudioStream failed.");
    mStateMachine->DispatchOnAudioSinkError();
    return;
  }

  while (1) {
    {
      ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
      WaitForAudioToPlay();
      if (!IsPlaybackContinuing()) {
        break;
      }
    }
    // See if there's a gap in the audio. If there is, push silence into the
    // audio hardware, so we can play across the gap.
    // Calculate the timestamp of the next chunk of audio in numbers of
    // samples.
    NS_ASSERTION(AudioQueue().GetSize() > 0, "Should have data to play");
    CheckedInt64 sampleTime = UsecsToFrames(AudioQueue().PeekFront()->mTime, mInfo.mRate);

    // Calculate the number of frames that have been pushed onto the audio hardware.
    CheckedInt64 playedFrames = UsecsToFrames(mStartTime, mInfo.mRate) + mWritten;

    CheckedInt64 missingFrames = sampleTime - playedFrames;
    if (!missingFrames.isValid() || !sampleTime.isValid()) {
      NS_WARNING("Int overflow adding in AudioLoop");
      break;
    }

    if (missingFrames.value() > AUDIO_FUZZ_FRAMES) {
      // The next audio chunk begins some time after the end of the last chunk
      // we pushed to the audio hardware. We must push silence into the audio
      // hardware so that the next audio chunk begins playback at the correct
      // time.
      missingFrames = std::min<int64_t>(UINT32_MAX, missingFrames.value());
      mWritten += PlaySilence(static_cast<uint32_t>(missingFrames.value()));
    } else {
      mWritten += PlayFromAudioQueue();
    }
    int64_t endTime = GetEndTime();
    if (endTime != -1) {
      mOnAudioEndTimeUpdateTask->Dispatch(endTime);
    }
  }
  ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
  MOZ_ASSERT(mStopAudioThread || AudioQueue().AtEndOfStream());
  if (!mStopAudioThread && mPlaying) {
    Drain();
  }
  SINK_LOG("AudioLoop complete");
  Cleanup();
  SINK_LOG("AudioLoop exit");
}
开发者ID:giota-cliqz,项目名称:browser-f,代码行数:60,代码来源:AudioSink.cpp

示例4: UsecsToFrames

static void
SendStreamAudio(DecodedStreamData* aStream, int64_t aStartTime,
                MediaData* aData, AudioSegment* aOutput,
                uint32_t aRate, double aVolume)
{
  MOZ_ASSERT(aData);
  AudioData* audio = aData->As<AudioData>();
  // This logic has to mimic AudioSink closely to make sure we write
  // the exact same silences
  CheckedInt64 audioWrittenOffset = aStream->mAudioFramesWritten +
                                    UsecsToFrames(aStartTime, aRate);
  CheckedInt64 frameOffset = UsecsToFrames(audio->mTime, aRate);

  if (!audioWrittenOffset.isValid() ||
      !frameOffset.isValid() ||
      // ignore packet that we've already processed
      frameOffset.value() + audio->mFrames <= audioWrittenOffset.value()) {
    return;
  }

  if (audioWrittenOffset.value() < frameOffset.value()) {
    int64_t silentFrames = frameOffset.value() - audioWrittenOffset.value();
    // Write silence to catch up
    AudioSegment silence;
    silence.InsertNullDataAtStart(silentFrames);
    aStream->mAudioFramesWritten += silentFrames;
    audioWrittenOffset += silentFrames;
    aOutput->AppendFrom(&silence);
  }

  MOZ_ASSERT(audioWrittenOffset.value() >= frameOffset.value());

  int64_t offset = audioWrittenOffset.value() - frameOffset.value();
  size_t framesToWrite = audio->mFrames - offset;

  audio->EnsureAudioBuffer();
  nsRefPtr<SharedBuffer> buffer = audio->mAudioBuffer;
  AudioDataValue* bufferData = static_cast<AudioDataValue*>(buffer->Data());
  nsAutoTArray<const AudioDataValue*, 2> channels;
  for (uint32_t i = 0; i < audio->mChannels; ++i) {
    channels.AppendElement(bufferData + i * audio->mFrames + offset);
  }
  aOutput->AppendFrames(buffer.forget(), channels, framesToWrite);
  aStream->mAudioFramesWritten += framesToWrite;
  aOutput->ApplyVolume(aVolume);

  aStream->mNextAudioTime = audio->GetEndTime();
}
开发者ID:ibheem,项目名称:gecko-dev,代码行数:48,代码来源:DecodedStream.cpp

示例5: DecodeAudioData

bool MediaPluginReader::DecodeAudioData()
{
  NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");

  // This is the approximate byte position in the stream.
  int64_t pos = mDecoder->GetResource()->Tell();

  // Read next frame
  MPAPI::AudioFrame frame;
  if (!mPlugin->ReadAudio(mPlugin, &frame, mAudioSeekTimeUs)) {
    return false;
  }
  mAudioSeekTimeUs = -1;

  // Ignore empty buffers which stagefright media read will sporadically return
  if (frame.mSize == 0)
    return true;

  nsAutoArrayPtr<AudioDataValue> buffer(new AudioDataValue[frame.mSize/2] );
  memcpy(buffer.get(), frame.mData, frame.mSize);

  uint32_t frames = frame.mSize / (2 * frame.mAudioChannels);
  CheckedInt64 duration = FramesToUsecs(frames, frame.mAudioSampleRate);
  if (!duration.isValid()) {
    return false;
  }

  mAudioQueue.Push(new AudioData(pos,
                                 frame.mTimeUs,
                                 duration.value(),
                                 frames,
                                 buffer.forget(),
                                 frame.mAudioChannels));
  return true;
}
开发者ID:ehsan,项目名称:gecko-dev,代码行数:35,代码来源:MediaPluginReader.cpp

示例6: GetDuration

nsresult SkeletonState::GetDuration(const nsTArray<uint32_t>& aTracks,
                                      int64_t& aDuration)
{
  if (!mActive ||
      mVersion < SKELETON_VERSION(4,0) ||
      !HasIndex() ||
      aTracks.Length() == 0)
  {
    return NS_ERROR_FAILURE;
  }
  int64_t endTime = INT64_MIN;
  int64_t startTime = INT64_MAX;
  for (uint32_t i=0; i<aTracks.Length(); i++) {
    nsKeyFrameIndex* index = nullptr;
    mIndex.Get(aTracks[i], &index);
    if (!index) {
      // Can't get the timestamps for one of the required tracks, fail.
      return NS_ERROR_FAILURE;
    }
    if (index->mEndTime > endTime) {
      endTime = index->mEndTime;
    }
    if (index->mStartTime < startTime) {
      startTime = index->mStartTime;
    }
  }
  NS_ASSERTION(endTime > startTime, "Duration must be positive");
  CheckedInt64 duration = CheckedInt64(endTime) - startTime;
  aDuration = duration.isValid() ? duration.value() : 0;
  return duration.isValid() ? NS_OK : NS_ERROR_FAILURE;
}
开发者ID:ConradIrwin,项目名称:gecko-dev,代码行数:31,代码来源:OggCodecState.cpp

示例7: Time

int64_t OpusState::Time(int aPreSkip, int64_t aGranulepos)
{
  if (aGranulepos < 0)
    return -1;

  // Ogg Opus always runs at a granule rate of 48 kHz.
  CheckedInt64 t = CheckedInt64(aGranulepos - aPreSkip) * USECS_PER_S;
  return t.isValid() ? t.value() / 48000 : -1;
}
开发者ID:ConradIrwin,项目名称:gecko-dev,代码行数:9,代码来源:OggCodecState.cpp

示例8: StartTime

int64_t TheoraState::StartTime(int64_t granulepos) {
  if (granulepos < 0 || !mActive || mInfo.fps_numerator == 0) {
    return -1;
  }
  CheckedInt64 t = (CheckedInt64(th_granule_frame(mCtx, granulepos)) * USECS_PER_S) * mInfo.fps_denominator;
  if (!t.isValid())
    return -1;
  return t.value() / mInfo.fps_numerator;
}
开发者ID:ConradIrwin,项目名称:gecko-dev,代码行数:9,代码来源:OggCodecState.cpp

示例9: FramesToUsecs

int64_t
AudioSink::GetEndTime() const
{
  CheckedInt64 playedUsecs = FramesToUsecs(mWritten, mInfo.mRate) + mStartTime;
  if (!playedUsecs.isValid()) {
    NS_WARNING("Int overflow calculating audio end time");
    return -1;
  }
  return playedUsecs.value();
}
开发者ID:darchons,项目名称:gecko-dev,代码行数:10,代码来源:AudioSink.cpp

示例10: ReleaseAudioBuffer

nsresult
GonkAudioDecoderManager::CreateAudioData(int64_t aStreamOffset, AudioData **v) {
  if (!(mAudioBuffer != nullptr && mAudioBuffer->data() != nullptr)) {
    GADM_LOG("Audio Buffer is not valid!");
    return NS_ERROR_UNEXPECTED;
  }

  int64_t timeUs;
  if (!mAudioBuffer->meta_data()->findInt64(kKeyTime, &timeUs)) {
    return NS_ERROR_UNEXPECTED;
  }

  if (mAudioBuffer->range_length() == 0) {
    // Some decoders may return spurious empty buffers that we just want to ignore
    // quoted from Android's AwesomePlayer.cpp
    ReleaseAudioBuffer();
    return NS_ERROR_NOT_AVAILABLE;
  }

  if (mLastDecodedTime > timeUs) {
    ReleaseAudioBuffer();
    GADM_LOG("Output decoded sample time is revert. time=%lld", timeUs);
    MOZ_ASSERT(false);
    return NS_ERROR_NOT_AVAILABLE;
  }
  mLastDecodedTime = timeUs;

  const uint8_t *data = static_cast<const uint8_t*>(mAudioBuffer->data());
  size_t dataOffset = mAudioBuffer->range_offset();
  size_t size = mAudioBuffer->range_length();

  nsAutoArrayPtr<AudioDataValue> buffer(new AudioDataValue[size/2]);
  memcpy(buffer.get(), data+dataOffset, size);
  uint32_t frames = size / (2 * mAudioChannels);

  CheckedInt64 duration = FramesToUsecs(frames, mAudioRate);
  if (!duration.isValid()) {
    return NS_ERROR_UNEXPECTED;
  }
  nsRefPtr<AudioData> audioData = new AudioData(aStreamOffset,
                                                timeUs,
                                                duration.value(),
                                                frames,
                                                buffer.forget(),
                                                mAudioChannels,
                                                mAudioRate);
  ReleaseAudioBuffer();
  audioData.forget(v);
  return NS_OK;
}
开发者ID:rhelmer,项目名称:gecko-dev,代码行数:50,代码来源:GonkAudioDecoderManager.cpp

示例11: Create

 MediaData* Create(const media::TimeUnit& aDTS,
                   const media::TimeUnit& aDuration,
                   int64_t aOffsetInStream)
 {
   // Convert duration to frames. We add 1 to duration to account for
   // rounding errors, so we get a consistent tone.
   CheckedInt64 frames =
     UsecsToFrames(aDuration.ToMicroseconds()+1, mSampleRate);
   if (!frames.isValid() ||
       !mChannelCount ||
       !mSampleRate ||
       frames.value() > (UINT32_MAX / mChannelCount)) {
     return nullptr;
   }
   AlignedAudioBuffer samples(frames.value() * mChannelCount);
   if (!samples) {
     return nullptr;
   }
   // Fill the sound buffer with an A4 tone.
   static const float pi = 3.14159265f;
   static const float noteHz = 440.0f;
   for (int i = 0; i < frames.value(); i++) {
     float f = sin(2 * pi * noteHz * mFrameSum / mSampleRate);
     for (unsigned c = 0; c < mChannelCount; c++) {
       samples[i * mChannelCount + c] = AudioDataValue(f);
     }
     mFrameSum++;
   }
   return new AudioData(aOffsetInStream,
                        aDTS.ToMicroseconds(),
                        aDuration.ToMicroseconds(),
                        uint32_t(frames.value()),
                        Move(samples),
                        mChannelCount,
                        mSampleRate);
 }
开发者ID:zbraniecki,项目名称:gecko-dev,代码行数:36,代码来源:BlankDecoderModule.cpp

示例12:

PRInt64
nsTheoraState::MaxKeyframeOffset()
{
  // Determine the maximum time in microseconds by which a key frame could
  // offset for the theora bitstream. Theora granulepos encode time as:
  // ((key_frame_number << granule_shift) + frame_offset).
  // Therefore the maximum possible time by which any frame could be offset
  // from a keyframe is the duration of (1 << granule_shift) - 1) frames.
  PRInt64 frameDuration;
  
  // Max number of frames keyframe could possibly be offset.
  PRInt64 keyframeDiff = (1 << mInfo.keyframe_granule_shift) - 1;

  // Length of frame in usecs.
  CheckedInt64 d = CheckedInt64(mInfo.fps_denominator) * USECS_PER_S;
  if (!d.valid())
    d = 0;
  frameDuration = d.value() / mInfo.fps_numerator;

  // Total time in usecs keyframe can be offset from any given frame.
  return frameDuration * keyframeDiff;
}
开发者ID:Anachid,项目名称:mozilla-central,代码行数:22,代码来源:nsOggCodecState.cpp

示例13: GetPosition

bool
WaveReader::LoadAllChunks(nsAutoPtr<dom::HTMLMediaElement::MetadataTags> &aTags)
{
  // Chunks are always word (two byte) aligned.
  MOZ_ASSERT(mDecoder->GetResource()->Tell() % 2 == 0,
             "LoadAllChunks called with unaligned resource");

  bool loadFormatChunk = false;
  bool findDataOffset = false;

  for (;;) {
    static const unsigned int CHUNK_HEADER_SIZE = 8;
    char chunkHeader[CHUNK_HEADER_SIZE];
    const char* p = chunkHeader;

    if (!ReadAll(chunkHeader, sizeof(chunkHeader))) {
      return false;
    }

    static_assert(sizeof(uint32_t) * 2 <= CHUNK_HEADER_SIZE,
                  "Reads would overflow chunkHeader buffer.");

    uint32_t magic = ReadUint32BE(&p);
    uint32_t chunkSize = ReadUint32LE(&p);
    int64_t chunkStart = GetPosition();

    switch (magic) {
      case FRMT_CHUNK_MAGIC:
        loadFormatChunk = LoadFormatChunk(chunkSize);
        if (!loadFormatChunk) {
          return false;
        }
        break;

      case LIST_CHUNK_MAGIC:
        if (!aTags) {
          LoadListChunk(chunkSize, aTags);
        }
        break;

      case DATA_CHUNK_MAGIC:
        findDataOffset = FindDataOffset(chunkSize);
        return loadFormatChunk && findDataOffset;

      default:
        break;
    }

    // RIFF chunks are two-byte aligned, so round up if necessary.
    chunkSize += chunkSize % 2;

    // Move forward to next chunk
    CheckedInt64 forward = CheckedInt64(chunkStart) + chunkSize - GetPosition();

    if (!forward.isValid() || forward.value() < 0) {
      return false;
    }

    static const int64_t MAX_CHUNK_SIZE = 1 << 16;
    static_assert(uint64_t(MAX_CHUNK_SIZE) < UINT_MAX / sizeof(char),
                  "MAX_CHUNK_SIZE too large for enumerator.");
    nsAutoArrayPtr<char> chunk(new char[MAX_CHUNK_SIZE]);
    while (forward.value() > 0) {
      int64_t size = std::min(forward.value(), MAX_CHUNK_SIZE);
      if (!ReadAll(chunk.get(), size)) {
        return false;
      }
      forward -= size;
    }
  }

  return false;
}
开发者ID:lgarner,项目名称:mozilla-central,代码行数:73,代码来源:WaveReader.cpp

示例14: silenceBuffer

void
AudioSink::NotifyAudioNeeded()
{
  MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn(),
             "Not called from the owner's thread");

  // Always ensure we have two processed frames pending to allow for processing
  // latency.
  while (mAudioQueue.GetSize() && (mAudioQueue.IsFinished() ||
                                    mProcessedQueueLength < LOW_AUDIO_USECS ||
                                    mProcessedQueue.GetSize() < 2)) {
    RefPtr<AudioData> data = mAudioQueue.PopFront();

    // Ignore the element with 0 frames and try next.
    if (!data->mFrames) {
      continue;
    }

    if (!mConverter ||
        (data->mRate != mConverter->InputConfig().Rate() ||
         data->mChannels != mConverter->InputConfig().Channels())) {
      SINK_LOG_V("Audio format changed from %[email protected]%uHz to %[email protected]%uHz",
                 mConverter? mConverter->InputConfig().Channels() : 0,
                 mConverter ? mConverter->InputConfig().Rate() : 0,
                 data->mChannels, data->mRate);

      DrainConverter();

      // mFramesParsed indicates the current playtime in frames at the current
      // input sampling rate. Recalculate it per the new sampling rate.
      if (mFramesParsed) {
        // We minimize overflow.
        uint32_t oldRate = mConverter->InputConfig().Rate();
        uint32_t newRate = data->mRate;
        CheckedInt64 result = SaferMultDiv(mFramesParsed, newRate, oldRate);
        if (!result.isValid()) {
          NS_WARNING("Int overflow in AudioSink");
          mErrored = true;
          return;
        }
        mFramesParsed = result.value();
      }

      mConverter =
        MakeUnique<AudioConverter>(
          AudioConfig(data->mChannels, data->mRate),
          AudioConfig(mOutputChannels, mOutputRate));
    }

    // See if there's a gap in the audio. If there is, push silence into the
    // audio hardware, so we can play across the gap.
    // Calculate the timestamp of the next chunk of audio in numbers of
    // samples.
    CheckedInt64 sampleTime =
      TimeUnitToFrames(data->mTime - mStartTime, data->mRate);
    // Calculate the number of frames that have been pushed onto the audio hardware.
    CheckedInt64 missingFrames = sampleTime - mFramesParsed;

    if (!missingFrames.isValid()) {
      NS_WARNING("Int overflow in AudioSink");
      mErrored = true;
      return;
    }

    if (missingFrames.value() > AUDIO_FUZZ_FRAMES) {
      // The next audio packet begins some time after the end of the last packet
      // we pushed to the audio hardware. We must push silence into the audio
      // hardware so that the next audio packet begins playback at the correct
      // time.
      missingFrames = std::min<int64_t>(INT32_MAX, missingFrames.value());
      mFramesParsed += missingFrames.value();

      RefPtr<AudioData> silenceData;
      AlignedAudioBuffer silenceBuffer(missingFrames.value() * data->mChannels);
       if (!silenceBuffer) {
         NS_WARNING("OOM in AudioSink");
         mErrored = true;
         return;
       }
      if (mConverter->InputConfig() != mConverter->OutputConfig()) {
        AlignedAudioBuffer convertedData =
          mConverter->Process(AudioSampleBuffer(Move(silenceBuffer))).Forget();
        silenceData = CreateAudioFromBuffer(Move(convertedData), data);
      } else {
        silenceData = CreateAudioFromBuffer(Move(silenceBuffer), data);
      }
      PushProcessedAudio(silenceData);
    }

    mLastEndTime = data->GetEndTime();
    mFramesParsed += data->mFrames;

    if (mConverter->InputConfig() != mConverter->OutputConfig()) {
      // We must ensure that the size in the buffer contains exactly the number
      // of frames, in case one of the audio producer over allocated the buffer.
      AlignedAudioBuffer buffer(Move(data->mAudioData));
      buffer.SetLength(size_t(data->mFrames) * data->mChannels);

      AlignedAudioBuffer convertedData =
        mConverter->Process(AudioSampleBuffer(Move(buffer))).Forget();
//.........这里部分代码省略.........
开发者ID:luke-chang,项目名称:gecko-1,代码行数:101,代码来源:AudioSink.cpp

示例15: MediaResult

MediaResult
VorbisDataDecoder::DoDecode(MediaRawData* aSample)
{
  MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());

  const unsigned char* aData = aSample->Data();
  size_t aLength = aSample->Size();
  int64_t aOffset = aSample->mOffset;
  uint64_t aTstampUsecs = aSample->mTime;
  int64_t aTotalFrames = 0;

  MOZ_ASSERT(mPacketCount >= 3);

  if (!mLastFrameTime || mLastFrameTime.ref() != aSample->mTime) {
    // We are starting a new block.
    mFrames = 0;
    mLastFrameTime = Some(aSample->mTime);
  }

  ogg_packet pkt = InitVorbisPacket(aData, aLength, false, aSample->mEOS,
                                    aSample->mTimecode, mPacketCount++);

  int err = vorbis_synthesis(&mVorbisBlock, &pkt);
  if (err) {
    return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
                       RESULT_DETAIL("vorbis_synthesis:%d", err));
  }

  err = vorbis_synthesis_blockin(&mVorbisDsp, &mVorbisBlock);
  if (err) {
    return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
                       RESULT_DETAIL("vorbis_synthesis_blockin:%d", err));
  }

  VorbisPCMValue** pcm = 0;
  int32_t frames = vorbis_synthesis_pcmout(&mVorbisDsp, &pcm);
  if (frames == 0) {
    return NS_OK;
  }
  while (frames > 0) {
    uint32_t channels = mVorbisDsp.vi->channels;
    uint32_t rate = mVorbisDsp.vi->rate;
    AlignedAudioBuffer buffer(frames*channels);
    if (!buffer) {
      return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
    }
    for (uint32_t j = 0; j < channels; ++j) {
      VorbisPCMValue* channel = pcm[j];
      for (uint32_t i = 0; i < uint32_t(frames); ++i) {
        buffer[i*channels + j] = MOZ_CONVERT_VORBIS_SAMPLE(channel[i]);
      }
    }

    CheckedInt64 duration = FramesToUsecs(frames, rate);
    if (!duration.isValid()) {
      return MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
                         RESULT_DETAIL("Overflow converting audio duration"));
    }
    CheckedInt64 total_duration = FramesToUsecs(mFrames, rate);
    if (!total_duration.isValid()) {
      return MediaResult(
        NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
        RESULT_DETAIL("Overflow converting audio total_duration"));
    }

    CheckedInt64 time = total_duration + aTstampUsecs;
    if (!time.isValid()) {
      return MediaResult(
        NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
        RESULT_DETAIL("Overflow adding total_duration and aTstampUsecs"));
    };

    if (!mAudioConverter) {
      AudioConfig in(AudioConfig::ChannelLayout(channels, VorbisLayout(channels)),
                     rate);
      AudioConfig out(channels, rate);
      if (!in.IsValid() || !out.IsValid()) {
        return MediaResult(
          NS_ERROR_DOM_MEDIA_FATAL_ERR,
          RESULT_DETAIL("Invalid channel layout:%u", channels));
      }
      mAudioConverter = MakeUnique<AudioConverter>(in, out);
    }
    MOZ_ASSERT(mAudioConverter->CanWorkInPlace());
    AudioSampleBuffer data(Move(buffer));
    data = mAudioConverter->Process(Move(data));

    aTotalFrames += frames;
    mCallback->Output(new AudioData(aOffset,
                                    time.value(),
                                    duration.value(),
                                    frames,
                                    data.Forget(),
                                    channels,
                                    rate));
    mFrames += frames;
    err = vorbis_synthesis_read(&mVorbisDsp, frames);
    if (err) {
      return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
                         RESULT_DETAIL("vorbis_synthesis_read:%d", err));
//.........这里部分代码省略.........
开发者ID:MichaelKohler,项目名称:gecko-dev,代码行数:101,代码来源:VorbisDecoder.cpp


注:本文中的CheckedInt64::value方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。