本文整理汇总了C++中AudioSegment::InsertNullDataAtStart方法的典型用法代码示例。如果您正苦于以下问题:C++ AudioSegment::InsertNullDataAtStart方法的具体用法?C++ AudioSegment::InsertNullDataAtStart怎么用?C++ AudioSegment::InsertNullDataAtStart使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类AudioSegment
的用法示例。
在下文中一共展示了AudioSegment::InsertNullDataAtStart方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1:
NS_IMETHODIMP
MediaEngineDefaultAudioSource::Notify(nsITimer* aTimer)
{
AudioSegment segment;
segment.InsertNullDataAtStart(AUDIO_RATE/100); // 10ms of fake data
mSource->AppendToTrack(mTrackID, &segment);
return NS_OK;
}
示例2:
NS_IMETHODIMP
MediaEngineDefaultAudioSource::Notify(nsITimer* aTimer)
{
AudioSegment segment;
segment.Init(CHANNELS);
segment.InsertNullDataAtStart(1);
mSource->AppendToTrack(mTrackID, &segment);
return NS_OK;
}
示例3:
NS_IMETHODIMP
MediaEngineDefaultAudioSource::Notify(nsITimer* aTimer)
{
AudioSegment segment;
// Notify timer is set every DEFAULT_AUDIO_TIMER_MS milliseconds.
segment.InsertNullDataAtStart((AUDIO_RATE * MediaEngine::DEFAULT_AUDIO_TIMER_MS) / 1000);
mSource->AppendToTrack(mTrackID, &segment);
return NS_OK;
}
示例4: UsecsToFrames
static void
SendStreamAudio(DecodedStreamData* aStream, int64_t aStartTime,
MediaData* aData, AudioSegment* aOutput,
uint32_t aRate, double aVolume)
{
MOZ_ASSERT(aData);
AudioData* audio = aData->As<AudioData>();
// This logic has to mimic AudioSink closely to make sure we write
// the exact same silences
CheckedInt64 audioWrittenOffset = aStream->mAudioFramesWritten +
UsecsToFrames(aStartTime, aRate);
CheckedInt64 frameOffset = UsecsToFrames(audio->mTime, aRate);
if (!audioWrittenOffset.isValid() ||
!frameOffset.isValid() ||
// ignore packet that we've already processed
frameOffset.value() + audio->mFrames <= audioWrittenOffset.value()) {
return;
}
if (audioWrittenOffset.value() < frameOffset.value()) {
int64_t silentFrames = frameOffset.value() - audioWrittenOffset.value();
// Write silence to catch up
AudioSegment silence;
silence.InsertNullDataAtStart(silentFrames);
aStream->mAudioFramesWritten += silentFrames;
audioWrittenOffset += silentFrames;
aOutput->AppendFrom(&silence);
}
MOZ_ASSERT(audioWrittenOffset.value() >= frameOffset.value());
int64_t offset = audioWrittenOffset.value() - frameOffset.value();
size_t framesToWrite = audio->mFrames - offset;
audio->EnsureAudioBuffer();
nsRefPtr<SharedBuffer> buffer = audio->mAudioBuffer;
AudioDataValue* bufferData = static_cast<AudioDataValue*>(buffer->Data());
nsAutoTArray<const AudioDataValue*, 2> channels;
for (uint32_t i = 0; i < audio->mChannels; ++i) {
channels.AppendElement(bufferData + i * audio->mFrames + offset);
}
aOutput->AppendFrames(buffer.forget(), channels, framesToWrite);
aStream->mAudioFramesWritten += framesToWrite;
aOutput->ApplyVolume(aVolume);
aStream->mNextAudioTime = audio->GetEndTime();
}
示例5: UsecsToFrames
static void
SendStreamAudio(DecodedStreamData* aStream, int64_t aStartTime,
MediaData* aData, AudioSegment* aOutput, uint32_t aRate,
const PrincipalHandle& aPrincipalHandle)
{
// The amount of audio frames that is used to fuzz rounding errors.
static const int64_t AUDIO_FUZZ_FRAMES = 1;
MOZ_ASSERT(aData);
AudioData* audio = aData->As<AudioData>();
// This logic has to mimic AudioSink closely to make sure we write
// the exact same silences
CheckedInt64 audioWrittenOffset = aStream->mAudioFramesWritten +
UsecsToFrames(aStartTime, aRate);
CheckedInt64 frameOffset = UsecsToFrames(audio->mTime, aRate);
if (!audioWrittenOffset.isValid() ||
!frameOffset.isValid() ||
// ignore packet that we've already processed
audio->GetEndTime() <= aStream->mNextAudioTime) {
return;
}
if (audioWrittenOffset.value() + AUDIO_FUZZ_FRAMES < frameOffset.value()) {
int64_t silentFrames = frameOffset.value() - audioWrittenOffset.value();
// Write silence to catch up
AudioSegment silence;
silence.InsertNullDataAtStart(silentFrames);
aStream->mAudioFramesWritten += silentFrames;
audioWrittenOffset += silentFrames;
aOutput->AppendFrom(&silence);
}
// Always write the whole sample without truncation to be consistent with
// DecodedAudioDataSink::PlayFromAudioQueue()
audio->EnsureAudioBuffer();
RefPtr<SharedBuffer> buffer = audio->mAudioBuffer;
AudioDataValue* bufferData = static_cast<AudioDataValue*>(buffer->Data());
AutoTArray<const AudioDataValue*, 2> channels;
for (uint32_t i = 0; i < audio->mChannels; ++i) {
channels.AppendElement(bufferData + i * audio->mFrames);
}
aOutput->AppendFrames(buffer.forget(), channels, audio->mFrames, aPrincipalHandle);
aStream->mAudioFramesWritten += audio->mFrames;
aStream->mNextAudioTime = audio->GetEndTime();
}