本文整理汇总了C++中AudioSegment::AppendFrames方法的典型用法代码示例。如果您正苦于以下问题:C++ AudioSegment::AppendFrames方法的具体用法?C++ AudioSegment::AppendFrames怎么用?C++ AudioSegment::AppendFrames使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类AudioSegment
的用法示例。
在下文中一共展示了AudioSegment::AppendFrames方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: enter
void
MediaEngineWebRTCAudioSource::Process(const int channel,
const webrtc::ProcessingTypes type, sample* audio10ms,
const int length, const int samplingFreq, const bool isStereo)
{
ReentrantMonitorAutoEnter enter(mMonitor);
if (mState != kStarted)
return;
uint32_t len = mSources.Length();
for (uint32_t i = 0; i < len; i++) {
nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(length * sizeof(sample));
sample* dest = static_cast<sample*>(buffer->Data());
memcpy(dest, audio10ms, length * sizeof(sample));
AudioSegment segment;
nsAutoTArray<const sample*,1> channels;
channels.AppendElement(dest);
segment.AppendFrames(buffer.forget(), channels, length);
SourceMediaStream *source = mSources[i];
if (source) {
// This is safe from any thread, and is safe if the track is Finished
// or Destroyed
source->AppendToTrack(mTrackID, &segment);
}
}
return;
}
示例2: Generate
void Generate(AudioSegment& aSegment, const int32_t& aSamples)
{
RefPtr<SharedBuffer> buffer = SharedBuffer::Create(aSamples * sizeof(int16_t));
int16_t* dest = static_cast<int16_t*>(buffer->Data());
mGenerator.generate(dest, aSamples);
AutoTArray<const int16_t*, 1> channels;
for (int32_t i = 0; i < mChannels; i++) {
channels.AppendElement(dest);
}
aSegment.AppendFrames(buffer.forget(), channels, aSamples, PRINCIPAL_HANDLE_NONE);
}
示例3: sizeof
void
MediaEngineDefaultAudioSource::AppendToSegment(AudioSegment& aSegment,
TrackTicks aSamples)
{
RefPtr<SharedBuffer> buffer = SharedBuffer::Create(aSamples * sizeof(int16_t));
int16_t* dest = static_cast<int16_t*>(buffer->Data());
mSineGenerator->generate(dest, aSamples);
AutoTArray<const int16_t*,1> channels;
channels.AppendElement(dest);
aSegment.AppendFrames(buffer.forget(), channels, aSamples);
}
示例4: sizeof
NS_IMETHODIMP
MediaEngineDefaultAudioSource::Notify(nsITimer* aTimer)
{
AudioSegment segment;
nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(AUDIO_FRAME_LENGTH * sizeof(int16_t));
int16_t* dest = static_cast<int16_t*>(buffer->Data());
mSineGenerator->generate(dest, AUDIO_FRAME_LENGTH);
nsAutoTArray<const int16_t*,1> channels;
channels.AppendElement(dest);
segment.AppendFrames(buffer.forget(), channels, AUDIO_FRAME_LENGTH);
mSource->AppendToTrack(mTrackID, &segment);
return NS_OK;
}
示例5:
void
nsSpeechTask::SendAudioImpl(RefPtr<mozilla::SharedBuffer>& aSamples, uint32_t aDataLen)
{
if (aDataLen == 0) {
mStream->EndAllTrackAndFinish();
return;
}
AudioSegment segment;
nsAutoTArray<const int16_t*, 1> channelData;
channelData.AppendElement(static_cast<int16_t*>(aSamples->Data()));
segment.AppendFrames(aSamples.forget(), channelData, aDataLen);
mStream->AppendToTrack(1, &segment);
mStream->AdvanceKnownTracksTime(STREAM_TIME_MAX);
}
示例6: AudioSegment
AudioSegment*
SpeechRecognition::CreateAudioSegment(nsTArray<nsRefPtr<SharedBuffer>>& aChunks)
{
AudioSegment* segment = new AudioSegment();
for (uint32_t i = 0; i < aChunks.Length(); ++i) {
nsRefPtr<SharedBuffer> buffer = aChunks[i];
const int16_t* chunkData = static_cast<const int16_t*>(buffer->Data());
nsAutoTArray<const int16_t*, 1> channels;
channels.AppendElement(chunkData);
segment->AppendFrames(buffer.forget(), channels, mAudioSamplesPerChunk);
}
return segment;
}
示例7: lock
void
MediaEngineWebRTCAudioSource::Process(int channel,
webrtc::ProcessingTypes type, sample* audio10ms,
int length, int samplingFreq, bool isStereo)
{
MonitorAutoLock lock(mMonitor);
if (mState != kStarted)
return;
uint32_t len = mSources.Length();
for (uint32_t i = 0; i < len; i++) {
nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(length * sizeof(sample));
sample* dest = static_cast<sample*>(buffer->Data());
memcpy(dest, audio10ms, length * sizeof(sample));
AudioSegment segment;
nsAutoTArray<const sample*,1> channels;
channels.AppendElement(dest);
segment.AppendFrames(buffer.forget(), channels, length);
TimeStamp insertTime;
segment.GetStartTime(insertTime);
SourceMediaStream *source = mSources[i];
if (source) {
// This is safe from any thread, and is safe if the track is Finished
// or Destroyed.
// Make sure we include the stream and the track.
// The 0:1 is a flag to note when we've done the final insert for a given input block.
LogTime(AsyncLatencyLogger::AudioTrackInsertion, LATENCY_STREAM_ID(source, mTrackID),
(i+1 < len) ? 0 : 1, insertTime);
source->AppendToTrack(mTrackID, &segment);
}
}
return;
}
示例8: enter
void
MediaEngineWebRTCAudioSource::Process(const int channel,
const webrtc::ProcessingTypes type, sample* audio10ms,
const int length, const int samplingFreq, const bool isStereo)
{
ReentrantMonitorAutoEnter enter(mMonitor);
if (mState != kStarted)
return;
nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(length * sizeof(sample));
sample* dest = static_cast<sample*>(buffer->Data());
memcpy(dest, audio10ms, length * sizeof(sample));
AudioSegment segment;
segment.Init(CHANNELS);
segment.AppendFrames(
buffer.forget(), length, 0, length, AUDIO_FORMAT_S16
);
mSource->AppendToTrack(mTrackID, &segment);
return;
}
示例9: sizeof
NS_IMETHODIMP
MediaEngineDefaultAudioSource::Notify(nsITimer* aTimer)
{
AudioSegment segment;
nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(AUDIO_FRAME_LENGTH * sizeof(int16_t));
int16_t* dest = static_cast<int16_t*>(buffer->Data());
mSineGenerator->generate(dest, AUDIO_FRAME_LENGTH);
nsAutoTArray<const int16_t*,1> channels;
channels.AppendElement(dest);
segment.AppendFrames(buffer.forget(), channels, AUDIO_FRAME_LENGTH);
mSource->AppendToTrack(mTrackID, &segment);
// Generate null data for fake tracks.
if (mHasFakeTracks) {
for (int i = 0; i < kFakeAudioTrackCount; ++i) {
AudioSegment nullSegment;
nullSegment.AppendNullData(AUDIO_FRAME_LENGTH);
mSource->AppendToTrack(kTrackCount + kFakeVideoTrackCount+i, &nullSegment);
}
}
return NS_OK;
}
示例10: sizeof
void
nsSpeechTask::SendAudioImpl(int16_t* aData, uint32_t aDataLen)
{
if (aDataLen == 0) {
mStream->EndAllTrackAndFinish();
return;
}
nsRefPtr<mozilla::SharedBuffer> samples =
SharedBuffer::Create(aDataLen * sizeof(int16_t));
int16_t* frames = static_cast<int16_t*>(samples->Data());
for (uint32_t i = 0; i < aDataLen; i++) {
frames[i] = aData[i];
}
AudioSegment segment;
nsAutoTArray<const int16_t*, 1> channelData;
channelData.AppendElement(frames);
segment.AppendFrames(samples.forget(), channelData, aDataLen);
mStream->AppendToTrack(1, &segment);
mStream->AdvanceKnownTracksTime(STREAM_TIME_MAX);
}
示例11: lock
void
MediaEngineWebRTCAudioSource::Process(int channel,
webrtc::ProcessingTypes type, sample* audio10ms,
int length, int samplingFreq, bool isStereo)
{
// On initial capture, throw away all far-end data except the most recent sample
// since it's already irrelevant and we want to keep avoid confusing the AEC far-end
// input code with "old" audio.
if (!mStarted) {
mStarted = true;
while (gFarendObserver->Size() > 1) {
FarEndAudioChunk *buffer = gFarendObserver->Pop(); // only call if size() > 0
free(buffer);
}
}
while (gFarendObserver->Size() > 0) {
FarEndAudioChunk *buffer = gFarendObserver->Pop(); // only call if size() > 0
if (buffer) {
int length = buffer->mSamples;
if (mVoERender->ExternalPlayoutData(buffer->mData,
gFarendObserver->PlayoutFrequency(),
gFarendObserver->PlayoutChannels(),
mPlayoutDelay,
length) == -1) {
return;
}
}
free(buffer);
}
#ifdef PR_LOGGING
mSamples += length;
if (mSamples > samplingFreq) {
mSamples %= samplingFreq; // just in case mSamples >> samplingFreq
if (PR_LOG_TEST(GetMediaManagerLog(), PR_LOG_DEBUG)) {
webrtc::EchoStatistics echo;
mVoECallReport->GetEchoMetricSummary(echo);
#define DUMP_STATVAL(x) (x).min, (x).max, (x).average
LOG(("Echo: ERL: %d/%d/%d, ERLE: %d/%d/%d, RERL: %d/%d/%d, NLP: %d/%d/%d",
DUMP_STATVAL(echo.erl),
DUMP_STATVAL(echo.erle),
DUMP_STATVAL(echo.rerl),
DUMP_STATVAL(echo.a_nlp)));
}
}
#endif
MonitorAutoLock lock(mMonitor);
if (mState != kStarted)
return;
uint32_t len = mSources.Length();
for (uint32_t i = 0; i < len; i++) {
nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(length * sizeof(sample));
sample* dest = static_cast<sample*>(buffer->Data());
memcpy(dest, audio10ms, length * sizeof(sample));
AudioSegment segment;
nsAutoTArray<const sample*,1> channels;
channels.AppendElement(dest);
segment.AppendFrames(buffer.forget(), channels, length);
TimeStamp insertTime;
segment.GetStartTime(insertTime);
SourceMediaStream *source = mSources[i];
if (source) {
// This is safe from any thread, and is safe if the track is Finished
// or Destroyed.
// Make sure we include the stream and the track.
// The 0:1 is a flag to note when we've done the final insert for a given input block.
LogTime(AsyncLatencyLogger::AudioTrackInsertion, LATENCY_STREAM_ID(source, mTrackID),
(i+1 < len) ? 0 : 1, insertTime);
source->AppendToTrack(mTrackID, &segment);
}
}
return;
}