本文整理汇总了C++中AudioSegment类的典型用法代码示例。如果您正苦于以下问题:C++ AudioSegment类的具体用法?C++ AudioSegment怎么用?C++ AudioSegment使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了AudioSegment类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: enter
void
MediaEngineWebRTCAudioSource::Process(const int channel,
const webrtc::ProcessingTypes type, sample* audio10ms,
const int length, const int samplingFreq, const bool isStereo)
{
ReentrantMonitorAutoEnter enter(mMonitor);
if (mState != kStarted)
return;
uint32_t len = mSources.Length();
for (uint32_t i = 0; i < len; i++) {
nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(length * sizeof(sample));
sample* dest = static_cast<sample*>(buffer->Data());
memcpy(dest, audio10ms, length * sizeof(sample));
AudioSegment segment;
nsAutoTArray<const sample*,1> channels;
channels.AppendElement(dest);
segment.AppendFrames(buffer.forget(), channels, length);
SourceMediaStream *source = mSources[i];
if (source) {
// This is safe from any thread, and is safe if the track is Finished
// or Destroyed
source->AppendToTrack(mTrackID, &segment);
}
}
return;
}
示例2: RateConvertTicksRoundUp
NS_IMETHODIMP
MediaEngineDefaultAudioSource::Notify(nsITimer* aTimer)
{
TimeStamp now = TimeStamp::Now();
TimeDuration timeSinceLastNotify = now - mLastNotify;
mLastNotify = now;
TrackTicks samplesSinceLastNotify =
RateConvertTicksRoundUp(AUDIO_RATE, 1000000, timeSinceLastNotify.ToMicroseconds());
// If it's been longer since the last Notify() than mBufferSize holds, we
// have underrun and the MSG had to append silence while waiting for us
// to push more data. In this case we reset to mBufferSize again.
TrackTicks samplesToAppend = std::min(samplesSinceLastNotify, mBufferSize);
AudioSegment segment;
AppendToSegment(segment, samplesToAppend);
mSource->AppendToTrack(mTrackID, &segment);
// Generate null data for fake tracks.
if (mHasFakeTracks) {
for (int i = 0; i < kFakeAudioTrackCount; ++i) {
AudioSegment nullSegment;
nullSegment.AppendNullData(samplesToAppend);
mSource->AppendToTrack(kTrackCount + kFakeVideoTrackCount+i, &nullSegment);
}
}
return NS_OK;
}
示例3: do_CreateInstance
nsresult
MediaEngineDefaultAudioSource::Start(SourceMediaStream* aStream, TrackID aID)
{
if (mState != kAllocated) {
return NULL;
}
mTimer = do_CreateInstance(NS_TIMER_CONTRACTID);
if (!mTimer) {
return NULL;
}
mSource = aStream;
// AddTrack will take ownership of segment
AudioSegment* segment = new AudioSegment();
segment->Init(CHANNELS);
mSource->AddTrack(aID, RATE, 0, segment);
// We aren't going to add any more tracks
mSource->AdvanceKnownTracksTime(STREAM_TIME_MAX);
// Remember TrackID so we can finish later
mTrackID = aID;
// 1 Audio frame per Video frame
mTimer->InitWithCallback(this, 1000 / FPS, nsITimer::TYPE_REPEATING_SLACK);
mState = kStarted;
return NS_OK;
}
示例4:
NS_IMETHODIMP
MediaEngineDefaultAudioSource::Notify(nsITimer* aTimer)
{
AudioSegment segment;
segment.InsertNullDataAtStart(AUDIO_RATE/100); // 10ms of fake data
mSource->AppendToTrack(mTrackID, &segment);
return NS_OK;
}
示例5: do_CreateInstance
nsresult
MediaEngineDefaultAudioSource::Start(SourceMediaStream* aStream, TrackID aID,
const PrincipalHandle& aPrincipalHandle)
{
if (mState != kAllocated) {
return NS_ERROR_FAILURE;
}
mTimer = do_CreateInstance(NS_TIMER_CONTRACTID);
if (!mTimer) {
return NS_ERROR_FAILURE;
}
mSource = aStream;
// We try to keep the appended data at this size.
// Make it two timer intervals to try to avoid underruns.
mBufferSize = 2 * (AUDIO_RATE * DEFAULT_AUDIO_TIMER_MS) / 1000;
// AddTrack will take ownership of segment
AudioSegment* segment = new AudioSegment();
AppendToSegment(*segment, mBufferSize);
mSource->AddAudioTrack(aID, AUDIO_RATE, 0, segment, SourceMediaStream::ADDTRACK_QUEUED);
if (mHasFakeTracks) {
for (int i = 0; i < kFakeAudioTrackCount; ++i) {
segment = new AudioSegment();
segment->AppendNullData(mBufferSize);
mSource->AddAudioTrack(kTrackCount + kFakeVideoTrackCount+i,
AUDIO_RATE, 0, segment, SourceMediaStream::ADDTRACK_QUEUED);
}
}
// Remember TrackID so we can finish later
mTrackID = aID;
// Remember PrincipalHandle since we don't append in NotifyPull.
mPrincipalHandle = aPrincipalHandle;
mLastNotify = TimeStamp::Now();
// 1 Audio frame per 10ms
#if defined(MOZ_WIDGET_GONK) && defined(DEBUG)
// B2G emulator debug is very, very slow and has problems dealing with realtime audio inputs
mTimer->InitWithCallback(this, DEFAULT_AUDIO_TIMER_MS*10,
nsITimer::TYPE_REPEATING_PRECISE_CAN_SKIP);
#else
mTimer->InitWithCallback(this, DEFAULT_AUDIO_TIMER_MS,
nsITimer::TYPE_REPEATING_PRECISE_CAN_SKIP);
#endif
mState = kStarted;
return NS_OK;
}
示例6:
NS_IMETHODIMP
MediaEngineDefaultAudioSource::Notify(nsITimer* aTimer)
{
AudioSegment segment;
segment.Init(CHANNELS);
segment.InsertNullDataAtStart(1);
mSource->AppendToTrack(mTrackID, &segment);
return NS_OK;
}
示例7:
NS_IMETHODIMP
MediaEngineDefaultAudioSource::Notify(nsITimer* aTimer)
{
AudioSegment segment;
// Notify timer is set every DEFAULT_AUDIO_TIMER_MS milliseconds.
segment.InsertNullDataAtStart((AUDIO_RATE * MediaEngine::DEFAULT_AUDIO_TIMER_MS) / 1000);
mSource->AppendToTrack(mTrackID, &segment);
return NS_OK;
}
示例8: EnsureTrack
void
AudioCaptureStream::ProcessInput(GraphTime aFrom, GraphTime aTo,
uint32_t aFlags)
{
uint32_t inputCount = mInputs.Length();
StreamBuffer::Track* track = EnsureTrack(mTrackId);
// Notify the DOM everything is in order.
if (!mTrackCreated) {
for (uint32_t i = 0; i < mListeners.Length(); i++) {
MediaStreamListener* l = mListeners[i];
AudioSegment tmp;
l->NotifyQueuedTrackChanges(
Graph(), mTrackId, 0, MediaStreamListener::TRACK_EVENT_CREATED, tmp);
l->NotifyFinishedTrackCreation(Graph());
}
mTrackCreated = true;
}
// If the captured stream is connected back to a object on the page (be it an
// HTMLMediaElement with a stream as source, or an AudioContext), a cycle
// situation occur. This can work if it's an AudioContext with at least one
// DelayNode, but the MSG will mute the whole cycle otherwise.
if (mFinished || InMutedCycle() || inputCount == 0) {
track->Get<AudioSegment>()->AppendNullData(aTo - aFrom);
} else {
// We mix down all the tracks of all inputs, to a stereo track. Everything
// is {up,down}-mixed to stereo.
mMixer.StartMixing();
AudioSegment output;
for (uint32_t i = 0; i < inputCount; i++) {
MediaStream* s = mInputs[i]->GetSource();
StreamBuffer::TrackIter tracks(s->GetStreamBuffer(), MediaSegment::AUDIO);
while (!tracks.IsEnded()) {
AudioSegment* inputSegment = tracks->Get<AudioSegment>();
StreamTime inputStart = s->GraphTimeToStreamTimeWithBlocking(aFrom);
StreamTime inputEnd = s->GraphTimeToStreamTimeWithBlocking(aTo);
AudioSegment toMix;
toMix.AppendSlice(*inputSegment, inputStart, inputEnd);
// Care for streams blocked in the [aTo, aFrom] range.
if (inputEnd - inputStart < aTo - aFrom) {
toMix.AppendNullData((aTo - aFrom) - (inputEnd - inputStart));
}
toMix.Mix(mMixer, MONO, Graph()->GraphRate());
tracks.Next();
}
}
// This calls MixerCallback below
mMixer.FinishMixing();
}
// Regardless of the status of the input tracks, we go foward.
mBuffer.AdvanceKnownTracksTime(GraphTimeToStreamTimeWithBlocking((aTo)));
}
示例9: PROFILER_LABEL
nsresult
OmxAudioTrackEncoder::GetEncodedTrack(EncodedFrameContainer& aData)
{
PROFILER_LABEL("OmxAACAudioTrackEncoder", "GetEncodedTrack",
js::ProfileEntry::Category::OTHER);
AudioSegment segment;
// Move all the samples from mRawSegment to segment. We only hold
// the monitor in this block.
{
ReentrantMonitorAutoEnter mon(mReentrantMonitor);
// Wait if mEncoder is not initialized nor canceled.
while (!mInitialized && !mCanceled) {
mReentrantMonitor.Wait();
}
if (mCanceled || mEncodingComplete) {
return NS_ERROR_FAILURE;
}
segment.AppendFrom(&mRawSegment);
}
nsresult rv;
if (segment.GetDuration() == 0) {
// Notify EOS at least once, even if segment is empty.
if (mEndOfStream && !mEosSetInEncoder) {
mEosSetInEncoder = true;
rv = mEncoder->Encode(segment, OMXCodecWrapper::BUFFER_EOS);
NS_ENSURE_SUCCESS(rv, rv);
}
// Nothing to encode but encoder could still have encoded data for earlier
// input.
return AppendEncodedFrames(aData);
}
// OMX encoder has limited input buffers only so we have to feed input and get
// output more than once if there are too many samples pending in segment.
while (segment.GetDuration() > 0) {
rv = mEncoder->Encode(segment,
mEndOfStream ? OMXCodecWrapper::BUFFER_EOS : 0);
NS_ENSURE_SUCCESS(rv, rv);
rv = AppendEncodedFrames(aData);
NS_ENSURE_SUCCESS(rv, rv);
}
return NS_OK;
}
示例10:
void
nsSpeechTask::SendAudioImpl(RefPtr<mozilla::SharedBuffer>& aSamples, uint32_t aDataLen)
{
if (aDataLen == 0) {
mStream->EndAllTrackAndFinish();
return;
}
AudioSegment segment;
nsAutoTArray<const int16_t*, 1> channelData;
channelData.AppendElement(static_cast<int16_t*>(aSamples->Data()));
segment.AppendFrames(aSamples.forget(), channelData, aDataLen);
mStream->AppendToTrack(1, &segment);
mStream->AdvanceKnownTracksTime(STREAM_TIME_MAX);
}
示例11: AudioSegment
AudioSegment*
SpeechRecognition::CreateAudioSegment(nsTArray<nsRefPtr<SharedBuffer>>& aChunks)
{
AudioSegment* segment = new AudioSegment();
for (uint32_t i = 0; i < aChunks.Length(); ++i) {
nsRefPtr<SharedBuffer> buffer = aChunks[i];
const int16_t* chunkData = static_cast<const int16_t*>(buffer->Data());
nsAutoTArray<const int16_t*, 1> channels;
channels.AppendElement(chunkData);
segment->AppendFrames(buffer.forget(), channels, mAudioSamplesPerChunk);
}
return segment;
}
示例12: sizeof
NS_IMETHODIMP
MediaEngineDefaultAudioSource::Notify(nsITimer* aTimer)
{
AudioSegment segment;
nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(AUDIO_FRAME_LENGTH * sizeof(int16_t));
int16_t* dest = static_cast<int16_t*>(buffer->Data());
mSineGenerator->generate(dest, AUDIO_FRAME_LENGTH);
nsAutoTArray<const int16_t*,1> channels;
channels.AppendElement(dest);
segment.AppendFrames(buffer.forget(), channels, AUDIO_FRAME_LENGTH);
mSource->AppendToTrack(mTrackID, &segment);
return NS_OK;
}
示例13: SendStreamAudio
static void
SendStreamAudio(DecodedStreamData* aStream, int64_t aStartTime,
MediaData* aData, AudioSegment* aOutput,
uint32_t aRate, double aVolume)
{
MOZ_ASSERT(aData);
AudioData* audio = aData->As<AudioData>();
// This logic has to mimic AudioSink closely to make sure we write
// the exact same silences
CheckedInt64 audioWrittenOffset = aStream->mAudioFramesWritten +
UsecsToFrames(aStartTime, aRate);
CheckedInt64 frameOffset = UsecsToFrames(audio->mTime, aRate);
if (!audioWrittenOffset.isValid() ||
!frameOffset.isValid() ||
// ignore packet that we've already processed
frameOffset.value() + audio->mFrames <= audioWrittenOffset.value()) {
return;
}
if (audioWrittenOffset.value() < frameOffset.value()) {
int64_t silentFrames = frameOffset.value() - audioWrittenOffset.value();
// Write silence to catch up
AudioSegment silence;
silence.InsertNullDataAtStart(silentFrames);
aStream->mAudioFramesWritten += silentFrames;
audioWrittenOffset += silentFrames;
aOutput->AppendFrom(&silence);
}
MOZ_ASSERT(audioWrittenOffset.value() >= frameOffset.value());
int64_t offset = audioWrittenOffset.value() - frameOffset.value();
size_t framesToWrite = audio->mFrames - offset;
audio->EnsureAudioBuffer();
nsRefPtr<SharedBuffer> buffer = audio->mAudioBuffer;
AudioDataValue* bufferData = static_cast<AudioDataValue*>(buffer->Data());
nsAutoTArray<const AudioDataValue*, 2> channels;
for (uint32_t i = 0; i < audio->mChannels; ++i) {
channels.AppendElement(bufferData + i * audio->mFrames + offset);
}
aOutput->AppendFrames(buffer.forget(), channels, framesToWrite);
aStream->mAudioFramesWritten += framesToWrite;
aOutput->ApplyVolume(aVolume);
aStream->mNextAudioTime = audio->GetEndTime();
}
示例14: SendStreamAudio
static void
SendStreamAudio(DecodedStreamData* aStream, int64_t aStartTime,
MediaData* aData, AudioSegment* aOutput, uint32_t aRate,
const PrincipalHandle& aPrincipalHandle)
{
// The amount of audio frames that is used to fuzz rounding errors.
static const int64_t AUDIO_FUZZ_FRAMES = 1;
MOZ_ASSERT(aData);
AudioData* audio = aData->As<AudioData>();
// This logic has to mimic AudioSink closely to make sure we write
// the exact same silences
CheckedInt64 audioWrittenOffset = aStream->mAudioFramesWritten +
UsecsToFrames(aStartTime, aRate);
CheckedInt64 frameOffset = UsecsToFrames(audio->mTime, aRate);
if (!audioWrittenOffset.isValid() ||
!frameOffset.isValid() ||
// ignore packet that we've already processed
audio->GetEndTime() <= aStream->mNextAudioTime) {
return;
}
if (audioWrittenOffset.value() + AUDIO_FUZZ_FRAMES < frameOffset.value()) {
int64_t silentFrames = frameOffset.value() - audioWrittenOffset.value();
// Write silence to catch up
AudioSegment silence;
silence.InsertNullDataAtStart(silentFrames);
aStream->mAudioFramesWritten += silentFrames;
audioWrittenOffset += silentFrames;
aOutput->AppendFrom(&silence);
}
// Always write the whole sample without truncation to be consistent with
// DecodedAudioDataSink::PlayFromAudioQueue()
audio->EnsureAudioBuffer();
RefPtr<SharedBuffer> buffer = audio->mAudioBuffer;
AudioDataValue* bufferData = static_cast<AudioDataValue*>(buffer->Data());
AutoTArray<const AudioDataValue*, 2> channels;
for (uint32_t i = 0; i < audio->mChannels; ++i) {
channels.AppendElement(bufferData + i * audio->mFrames);
}
aOutput->AppendFrames(buffer.forget(), channels, audio->mFrames, aPrincipalHandle);
aStream->mAudioFramesWritten += audio->mFrames;
aStream->mNextAudioTime = audio->GetEndTime();
}
示例15: printf
//Loop back audio through media-stream
nsresult
MediaEngineWebrtcAudioSource::Start(SourceMediaStream* aStream, TrackID aID)
{
const int DEFAULT_PORT = 55555;
printf("\n MediaEngineWebrtcAudioSource : Start: Entered ");
if (false == mInitDone || mState != kAllocated) {
return NULL;
}
if(!aStream)
return NULL;
mTimer = do_CreateInstance(NS_TIMER_CONTRACTID);
if (!mTimer) {
return NULL;
}
mSource = aStream;
AudioSegment* segment = new AudioSegment();
segment->Init(CHANNELS);
//segment->InsertNullDataAtStart(1);
mSource->AddTrack(aID, PLAYOUT_SAMPLE_FREQUENCY, 0, segment);
mSource->AdvanceKnownTracksTime(STREAM_TIME_MAX);
mTrackID = aID;
printf("\n Starting the audio engine ");
mVoEBase->SetLocalReceiver(mChannel,DEFAULT_PORT);
mVoEBase->SetSendDestination(mChannel,DEFAULT_PORT,"127.0.0.1");
if(-1 == mVoEXmedia->SetExternalPlayoutStatus(true)) {
printf("\n SetExternalPlayoutStatus failed %d ", mVoEBase->LastError() );
return NULL;
}
//loopback audio
mVoEBase->StartPlayout(mChannel);
mVoEBase->StartSend(mChannel);
mVoEBase->StartReceive(mChannel);
mState = kStarted;
// call every 10 milliseconds
mTimer->InitWithCallback(this, 10, nsITimer::TYPE_REPEATING_SLACK);
return NS_OK;
}