本文整理汇总了C++中AudioOutputPtr::playSample方法的典型用法代码示例。如果您正苦于以下问题:C++ AudioOutputPtr::playSample方法的具体用法?C++ AudioOutputPtr::playSample怎么用?C++ AudioOutputPtr::playSample使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类AudioOutputPtr
的用法示例。
在下文中一共展示了AudioOutputPtr::playSample方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: encodeAudioFrame
//.........这里部分代码省略.........
if (g.s.atTransmit == Settings::Continuous)
bIsSpeech = true;
else if (g.s.atTransmit == Settings::PushToTalk)
bIsSpeech = g.s.uiDoublePush && ((g.uiDoublePush < g.s.uiDoublePush) || (g.tDoublePush.elapsed() < g.s.uiDoublePush));
bIsSpeech = bIsSpeech || (g.iPushToTalk > 0);
ClientUser *p = ClientUser::get(g.uiSession);
if (g.s.bMute || ((g.s.lmLoopMode != Settings::Local) && p && (p->bMute || p->bSuppress)) || g.bPushToMute || (g.iTarget < 0)) {
bIsSpeech = false;
}
if (bIsSpeech) {
iSilentFrames = 0;
} else {
iSilentFrames++;
if (iSilentFrames > 500)
iFrameCounter = 0;
}
if (p) {
if (! bIsSpeech)
p->setTalking(Settings::Passive);
else if (g.iTarget == 0)
p->setTalking(Settings::Talking);
else
p->setTalking(Settings::Shouting);
}
if (g.s.bTxAudioCue && g.uiSession != 0) {
AudioOutputPtr ao = g.ao;
if (bIsSpeech && ! bPreviousVoice && ao)
ao->playSample(g.s.qsTxAudioCueOn);
else if (ao && !bIsSpeech && bPreviousVoice)
ao->playSample(g.s.qsTxAudioCueOff);
}
if (! bIsSpeech && ! bPreviousVoice) {
iBitrate = 0;
if (g.s.iaeIdleAction != Settings::Nothing && ((tIdle.elapsed() / 1000000ULL) > g.s.iIdleTime)) {
if (g.s.iaeIdleAction == Settings::Deafen && !g.s.bDeaf) {
tIdle.restart();
emit doDeaf();
} else if (g.s.iaeIdleAction == Settings::Mute && !g.s.bMute) {
tIdle.restart();
emit doMute();
}
}
spx_int32_t increment = 0;
speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_AGC_INCREMENT, &increment);
return;
} else {
spx_int32_t increment = 12;
speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_AGC_INCREMENT, &increment);
}
if (bIsSpeech && !bPreviousVoice) {
bResetEncoder = true;
}
tIdle.restart();
示例2: encodeAudioFrame
//.........这里部分代码省略.........
float micLevel = sqrtf(sum / static_cast<float>(iFrameSize));
dPeakSignal = qMax(20.0f*log10f(micLevel / 32768.0f), -96.0f);
spx_int32_t prob = 0;
speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_GET_PROB, &prob);
fSpeechProb = static_cast<float>(prob) / 100.0f;
// clean microphone level: peak of filtered signal attenuated by AGC gain
dPeakCleanMic = qMax(dPeakSignal - gainValue, -96.0f);
float level = (g.s.vsVAD == Settings::SignalToNoise) ? fSpeechProb : (1.0f + dPeakCleanMic / 96.0f);
bool bIsSpeech = false;
if (level > g.s.fVADmax)
bIsSpeech = true;
else if (level > g.s.fVADmin && bPreviousVoice)
bIsSpeech = true;
if (! bIsSpeech) {
iHoldFrames++;
if (iHoldFrames < g.s.iVoiceHold)
bIsSpeech = true;
} else {
iHoldFrames = 0;
}
if (g.s.atTransmit == Settings::Continous)
bIsSpeech = true;
else if (g.s.atTransmit == Settings::PushToTalk)
bIsSpeech = g.s.uiDoublePush && ((g.uiDoublePush < g.s.uiDoublePush) || (g.tDoublePush.elapsed() < g.s.uiDoublePush));
bIsSpeech = bIsSpeech || (g.iPushToTalk > 0);
if (g.s.bMute || ((g.s.lmLoopMode != Settings::Local) && p && (p->bMute || p->bSuppress)) || g.bPushToMute || (g.iTarget < 0)) {
bIsSpeech = false;
}
if (bIsSpeech) {
iSilentFrames = 0;
} else {
iSilentFrames++;
if (iSilentFrames > 500)
iFrameCounter = 0;
}
if (p) {
if (! bIsSpeech)
p->setTalking(Settings::Passive);
else if (g.iTarget == 0)
p->setTalking(Settings::Talking);
else
p->setTalking(Settings::Shouting);
}
if (g.s.bTxAudioCue && g.uiSession != 0) {
AudioOutputPtr ao = g.ao;
if (bIsSpeech && ! bPreviousVoice && ao)
ao->playSample(g.s.qsTxAudioCueOn);
else if (ao && !bIsSpeech && bPreviousVoice && ao)
ao->playSample(g.s.qsTxAudioCueOff);
}
if (! bIsSpeech && ! bPreviousVoice) {
iBitrate = 0;
if (g.s.iIdleTime && ! g.s.bDeaf && ((tIdle.elapsed() / 1000000ULL) > g.s.iIdleTime)) {
emit doDeaf();
tIdle.restart();
}
spx_int32_t increment = 0;
speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_AGC_INCREMENT, &increment);
return;
} else {
spx_int32_t increment = 12;
speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_AGC_INCREMENT, &increment);
}
tIdle.restart();
/*
int r = celt_encoder_ctl(ceEncoder, CELT_SET_POST_MDCT_CALLBACK(celtBack, NULL));
qWarning() << "Set Callback" << r;
*/
unsigned char buffer[512];
int len;
if (umtType != MessageHandler::UDPVoiceSpeex) {
len = encodeCELTFrame(psSource, buffer);
if (len == 0)
return;
} else {
len = encodeSpeexFrame(psSource, buffer);
}
flushCheck(QByteArray(reinterpret_cast<const char *>(buffer), len), ! bIsSpeech);
if (! bIsSpeech)
iBitrate = 0;
bPreviousVoice = bIsSpeech;
}