本文整理汇总了C++中AudioOutputPtr类的典型用法代码示例。如果您正苦于以下问题:C++ AudioOutputPtr类的具体用法?C++ AudioOutputPtr怎么用?C++ AudioOutputPtr使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了AudioOutputPtr类的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: lock
void JackAudioSystem::initializeOutput() {
QMutexLocker lock(&qmWait);
if (!jasys->bJackIsGood) {
return;
}
AudioOutputPtr ao = g.ao;
JackAudioOutput * const jao = dynamic_cast<JackAudioOutput *>(ao.get());
allocOutputBuffer(iBufferSize);
if (jao) {
jao->qmMutex.lock();
}
for (unsigned int i = 0; i < iOutPorts; ++i) {
char name[10];
snprintf(name, 10, "output_%d", i + 1);
out_ports[i] = jack_port_register(client, name, JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0);
if (out_ports[i] == NULL) {
qWarning("JackAudioSystem: unable to register 'output' port");
break;
}
}
bOutputIsGood = true;
if (jao) {
jao->qmMutex.unlock();
}
}
示例2: destroyOutput
void JackAudioSystem::destroyOutput() {
AudioOutputPtr ao = g.ao;
JackAudioOutput * const jao = dynamic_cast<JackAudioOutput *>(ao.get());
if (jao) {
jao->qmMutex.lock();
}
delete [] output_buffer;
output_buffer = NULL;
for (unsigned int i = 0; i < iOutPorts; ++i) {
if (out_ports[i] != NULL) {
int err = jack_port_unregister(client, out_ports[i]);
if (err != 0) {
qWarning("JackAudioSystem: unable to unregister out port - jack_port_unregister() returned %i", err);
}
out_ports[i] = NULL;
}
}
bOutputIsGood = false;
if (jao) {
jao->qmMutex.unlock();
}
}
示例3: qWarning
void LoopPlayer::addFrame(const QByteArray &packet, int seq) {
if (DOUBLE_RAND < g.s.dPacketLoss) {
qWarning("Drop");
return;
}
bool restart = (qtLastFetch.elapsed() > 100);
{
QMutexLocker l(&qmLock);
double time = qtTicker.elapsed();
double r;
if (restart)
r = 0.0;
else
r = DOUBLE_RAND * g.s.dMaxPacketDelay;
qmPackets.insert(static_cast<float>(time + r), Packet(seq, packet));
}
// Restart check
if (qtLastFetch.elapsed() > 100) {
AudioOutputPtr ao = g.ao;
if (ao) {
ao->addFrameToBuffer(this, QByteArray(), 0);
}
}
}
示例4: lock
void ClientUser::remove(unsigned int uiSession) {
QWriteLocker lock(&c_qrwlUsers);
ClientUser *p = c_qmUsers.take(uiSession);
if (p && p->cChannel)
p->cChannel->removeUser(p);
if (p) {
AudioOutputPtr ao = g.ao;
if (ao)
ao->removeBuffer(p);
}
}
示例5: process_callback
int JackAudioSystem::process_callback(jack_nframes_t nframes, void *arg) {
JackAudioSystem * const jas = static_cast<JackAudioSystem*>(arg);
if (jas && jas->bJackIsGood) {
AudioInputPtr ai = g.ai;
AudioOutputPtr ao = g.ao;
JackAudioInput * const jai = dynamic_cast<JackAudioInput *>(ai.get());
JackAudioOutput * const jao = dynamic_cast<JackAudioOutput *>(ao.get());
if (jai && jai->isRunning() && jai->iMicChannels > 0 && !jai->isFinished()) {
QMutexLocker(&jai->qmMutex);
void *input = jack_port_get_buffer(jas->in_port, nframes);
if (input != NULL) {
jai->addMic(input, nframes);
}
}
if (jao && jao->isRunning() && jao->iChannels > 0 && !jao->isFinished()) {
QMutexLocker(&jao->qmMutex);
jack_default_audio_sample_t *port_buffers[JACK_MAX_OUTPUT_PORTS];
for (unsigned int i = 0; i < jao->iChannels; ++i) {
port_buffers[i] = (jack_default_audio_sample_t*)jack_port_get_buffer(jas->out_ports[i], nframes);
if (port_buffers[i] == NULL) {
return 1;
}
}
jack_default_audio_sample_t * const buffer = jas->output_buffer;
memset(buffer, 0, sizeof(jack_default_audio_sample_t) * nframes * jao->iChannels);
jao->mix(buffer, nframes);
if (jao->iChannels == 1) {
memcpy(port_buffers[0], buffer, sizeof(jack_default_audio_sample_t) * nframes);
} else {
// de-interleave channels
for (unsigned int i = 0; i < nframes * jao->iChannels; ++i) {
port_buffers[i % jao->iChannels][i / jao->iChannels] = buffer[i];
}
}
}
}
return 0;
}
示例6: l
void LoopPlayer::fetchFrames() {
QMutexLocker l(&qmLock);
AudioOutputPtr ao = g.ao;
if (!ao || qmPackets.isEmpty())
return;
double cmp = qtTicker.elapsed();
QMultiMap<float, Packet>::iterator i = qmPackets.begin();
while (i != qmPackets.end()) {
if (i.key() > cmp)
break;
ao->addFrameToBuffer(this, i.value().second, i.value().first);
i = qmPackets.erase(i);
}
qtLastFetch.restart();
}
示例7: allocOutputBuffer
void JackAudioSystem::allocOutputBuffer(jack_nframes_t frames) {
iBufferSize = frames;
AudioOutputPtr ao = g.ao;
JackAudioOutput * const jao = dynamic_cast<JackAudioOutput *>(ao.get());
if (jao) {
jao->qmMutex.lock();
}
if (output_buffer) {
delete [] output_buffer;
output_buffer = NULL;
}
output_buffer = new jack_default_audio_sample_t[frames * iOutPorts];
if (output_buffer == NULL) {
bJackIsGood = false;
}
if (jao) {
jao->qmMutex.unlock();
}
}
示例8: Trace
//加入到音频播放队列
void LoopUser::addFrame(const QByteArray &packet) {
if (DOUBLE_RAND < g_struct.s.dPacketLoss) {
Trace("Drop");
return;
}
bool restart = (qtLastFetch.elapsed()*CLOCKS_PER_SEC > 100);
{
MutexLocker l(&qmLock);
double time = qtTicker.elapsed();
double r;
if (restart)
r = 0.0;
else
r = DOUBLE_RAND * g_struct.s.dMaxPacketDelay;
qmPackets.insert(make_pair(static_cast<float>(time + r), packet));
}
// Restart check
if (qtLastFetch.elapsed()*CLOCKS_PER_SEC > 100) {
AudioOutputPtr ao = g_struct.ao;
if (ao) {
MessageHandler::UDPMessageType msgType = MessageHandler::UDPVoiceAACPlus/*MessageHandler::UDPVoiceCELT*/;//static_cast<MessageHandler::UDPMessageType>((packet.at(0) >> 5) & 0x7);
for (int i=0; i<PER_FRAME_OF_SAMEPLE; i++)
{
ao->addFrameToBuffer(this, QByteArray(), 0, msgType);
char buf[512] = {0};
sprintf(buf,"LoopUser::addFrame QByteArray() t=%d\n",qtLastFetch.elapsed()*CLOCKS_PER_SEC );
//OutputDebugStringA(buf);
}
}
}
}
示例9: l
//从音频队列中取帧
void LoopUser::fetchFrames() {
MutexLocker l(&qmLock);
AudioOutputPtr ao = g_struct.ao;
if (!ao || qmPackets.empty()) {
return;
}
double cmp = qtTicker.elapsed();
std::multimap<float, QByteArray>::iterator i = qmPackets.begin();
while (i != qmPackets.end()) {
// if (i->first > cmp)
// {
// char buf[512] = {0};
// sprintf(buf,"LoopUser::fetchFrames() cmp=%f\n",i->first );
// OutputDebugStringA(buf);
// break;
// }
const QByteArray &data = i->second;
PacketDataStream pds((char*)&data[0], data.size());
unsigned int msgFlags = 0;
int iSeq = static_cast<unsigned int>(pds.next8());
QByteArray qba;
pds.dataBlock(pds.left(), qba);
ao->addFrameToBuffer(this, qba, iSeq, MessageHandler::UDPVoiceAACPlus/*MessageHandler::UDPVoiceCELT*/);
i = qmPackets.erase(i);
}
qtLastFetch.restart();
}
示例10: Q_ASSERT
void PulseAudioSystem::write_callback(pa_stream *s, size_t bytes, void *userdata) {
PulseAudioSystem *pas = reinterpret_cast<PulseAudioSystem *>(userdata);
Q_ASSERT(s == pas->pasOutput);
AudioOutputPtr ao = g.ao;
PulseAudioOutput *pao = dynamic_cast<PulseAudioOutput *>(ao.get());
unsigned char buffer[bytes];
if (! pao) {
// Transitioning, but most likely transitions back, so just zero.
memset(buffer, 0, bytes);
pa_stream_write(s, buffer, bytes, NULL, 0, PA_SEEK_RELATIVE);
pas->wakeup();
return;
}
const pa_sample_spec *pss = pa_stream_get_sample_spec(s);
const pa_channel_map *pcm = pa_stream_get_channel_map(pas->pasOutput);
if (!pa_sample_spec_equal(pss, &pao->pss) || !pa_channel_map_equal(pcm, &pao->pcm)) {
pao->pss = *pss;
pao->pcm = *pcm;
if (pss->format == PA_SAMPLE_FLOAT32NE)
pao->eSampleFormat = PulseAudioOutput::SampleFloat;
else
pao->eSampleFormat = PulseAudioOutput::SampleShort;
pao->iMixerFreq = pss->rate;
pao->iChannels = pss->channels;
unsigned int chanmasks[pss->channels];
for (int i=0;i<pss->channels;++i) {
unsigned int cm = 0;
switch (pcm->map[i]) {
case PA_CHANNEL_POSITION_LEFT:
cm = SPEAKER_FRONT_LEFT;
break;
case PA_CHANNEL_POSITION_RIGHT:
cm = SPEAKER_FRONT_RIGHT;
break;
case PA_CHANNEL_POSITION_CENTER:
cm = SPEAKER_FRONT_CENTER;
break;
case PA_CHANNEL_POSITION_REAR_LEFT:
cm = SPEAKER_BACK_LEFT;
break;
case PA_CHANNEL_POSITION_REAR_RIGHT:
cm = SPEAKER_BACK_RIGHT;
break;
case PA_CHANNEL_POSITION_REAR_CENTER:
cm = SPEAKER_BACK_CENTER;
break;
case PA_CHANNEL_POSITION_LFE:
cm = SPEAKER_LOW_FREQUENCY;
break;
case PA_CHANNEL_POSITION_SIDE_LEFT:
cm = SPEAKER_SIDE_LEFT;
break;
case PA_CHANNEL_POSITION_SIDE_RIGHT:
cm = SPEAKER_SIDE_RIGHT;
break;
case PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER:
cm = SPEAKER_FRONT_LEFT_OF_CENTER;
break;
case PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER:
cm = SPEAKER_FRONT_RIGHT_OF_CENTER;
break;
default:
cm = 0;
break;
}
chanmasks[i] = cm;
}
pao->initializeMixer(chanmasks);
}
const unsigned int iSampleSize = pao->iSampleSize;
const unsigned int samples = static_cast<unsigned int>(bytes) / iSampleSize;
bool oldAttenuation = pas->bAttenuating;
// do we have some mixed output?
if (pao->mix(buffer, samples)) {
// attenuate if instructed to or it's in settings
pas->bAttenuating = (g.bAttenuateOthers || g.s.bAttenuateOthers);
} else {
memset(buffer, 0, bytes);
// attenuate if intructed to (self-activated)
pas->bAttenuating = g.bAttenuateOthers;
}
// if the attenuation state has changed
if (oldAttenuation != pas->bAttenuating) {
pas->setVolumes();
}
pa_stream_write(s, buffer, iSampleSize * samples, NULL, 0, PA_SEEK_RELATIVE);
}
示例11: eventCallback
void PulseAudioSystem::eventCallback(pa_mainloop_api *api, pa_defer_event *) {
api->defer_enable(pade, false);
if (! bSourceDone || ! bSinkDone || ! bServerDone)
return;
AudioInputPtr ai = g.ai;
AudioOutputPtr ao = g.ao;
AudioInput *raw_ai = ai.get();
AudioOutput *raw_ao = ao.get();
PulseAudioInput *pai = dynamic_cast<PulseAudioInput *>(raw_ai);
PulseAudioOutput *pao = dynamic_cast<PulseAudioOutput *>(raw_ao);
if (raw_ao) {
QString odev = outputDevice();
pa_stream_state ost = pasOutput ? pa_stream_get_state(pasOutput) : PA_STREAM_TERMINATED;
bool do_stop = false;
bool do_start = false;
if (! pao && (ost == PA_STREAM_READY)) {
do_stop = true;
} else if (pao) {
switch (ost) {
case PA_STREAM_TERMINATED: {
if (pasOutput)
pa_stream_unref(pasOutput);
pa_sample_spec pss = qhSpecMap.value(odev);
pa_channel_map pcm = qhChanMap.value(odev);
if ((pss.format != PA_SAMPLE_FLOAT32NE) && (pss.format != PA_SAMPLE_S16NE))
pss.format = PA_SAMPLE_FLOAT32NE;
if (pss.rate == 0)
pss.rate = SAMPLE_RATE;
if ((pss.channels == 0) || (! g.s.doPositionalAudio()))
pss.channels = 1;
pasOutput = pa_stream_new(pacContext, mumble_sink_input, &pss, (pss.channels == 1) ? NULL : &pcm);
pa_stream_set_state_callback(pasOutput, stream_callback, this);
pa_stream_set_write_callback(pasOutput, write_callback, this);
}
case PA_STREAM_UNCONNECTED:
do_start = true;
break;
case PA_STREAM_READY: {
if (g.s.iOutputDelay != iDelayCache) {
do_stop = true;
} else if (g.s.doPositionalAudio() != bPositionalCache) {
do_stop = true;
} else if (odev != qsOutputCache) {
do_stop = true;
}
break;
}
default:
break;
}
}
if (do_stop) {
qWarning("PulseAudio: Stopping output");
pa_stream_disconnect(pasOutput);
iSinkId = -1;
} else if (do_start) {
qWarning("PulseAudio: Starting output: %s", qPrintable(odev));
pa_buffer_attr buff;
const pa_sample_spec *pss = pa_stream_get_sample_spec(pasOutput);
const size_t sampleSize = (pss->format == PA_SAMPLE_FLOAT32NE) ? sizeof(float) : sizeof(short);
const unsigned int iBlockLen = ((pao->iFrameSize * pss->rate) / SAMPLE_RATE) * pss->channels * static_cast<unsigned int>(sampleSize);
buff.tlength = iBlockLen * (g.s.iOutputDelay+1);
buff.minreq = iBlockLen;
buff.maxlength = -1;
buff.prebuf = -1;
buff.fragsize = iBlockLen;
iDelayCache = g.s.iOutputDelay;
bPositionalCache = g.s.doPositionalAudio();
qsOutputCache = odev;
pa_stream_connect_playback(pasOutput, qPrintable(odev), &buff, PA_STREAM_ADJUST_LATENCY, NULL, NULL);
pa_context_get_sink_info_by_name(pacContext, qPrintable(odev), sink_info_callback, this);
}
}
if (raw_ai) {
QString idev = inputDevice();
pa_stream_state ist = pasInput ? pa_stream_get_state(pasInput) : PA_STREAM_TERMINATED;
bool do_stop = false;
bool do_start = false;
if (! pai && (ist == PA_STREAM_READY)) {
do_stop = true;
} else if (pai) {
switch (ist) {
case PA_STREAM_TERMINATED: {
if (pasInput)
pa_stream_unref(pasInput);
pa_sample_spec pss = qhSpecMap.value(idev);
if ((pss.format != PA_SAMPLE_FLOAT32NE) && (pss.format != PA_SAMPLE_S16NE))
pss.format = PA_SAMPLE_FLOAT32NE;
if (pss.rate == 0)
//.........这里部分代码省略.........
示例12: qMax
//.........这里部分代码省略.........
iHoldFrames = 0;
}
if (g.s.atTransmit == Settings::Continuous)
bIsSpeech = true;
else if (g.s.atTransmit == Settings::PushToTalk)
bIsSpeech = g.s.uiDoublePush && ((g.uiDoublePush < g.s.uiDoublePush) || (g.tDoublePush.elapsed() < g.s.uiDoublePush));
bIsSpeech = bIsSpeech || (g.iPushToTalk > 0);
ClientUser *p = ClientUser::get(g.uiSession);
if (g.s.bMute || ((g.s.lmLoopMode != Settings::Local) && p && (p->bMute || p->bSuppress)) || g.bPushToMute || (g.iTarget < 0)) {
bIsSpeech = false;
}
if (bIsSpeech) {
iSilentFrames = 0;
} else {
iSilentFrames++;
if (iSilentFrames > 500)
iFrameCounter = 0;
}
if (p) {
if (! bIsSpeech)
p->setTalking(Settings::Passive);
else if (g.iTarget == 0)
p->setTalking(Settings::Talking);
else
p->setTalking(Settings::Shouting);
}
if (g.s.bTxAudioCue && g.uiSession != 0) {
AudioOutputPtr ao = g.ao;
if (bIsSpeech && ! bPreviousVoice && ao)
ao->playSample(g.s.qsTxAudioCueOn);
else if (ao && !bIsSpeech && bPreviousVoice)
ao->playSample(g.s.qsTxAudioCueOff);
}
if (! bIsSpeech && ! bPreviousVoice) {
iBitrate = 0;
if (g.s.iaeIdleAction != Settings::Nothing && ((tIdle.elapsed() / 1000000ULL) > g.s.iIdleTime)) {
if (g.s.iaeIdleAction == Settings::Deafen && !g.s.bDeaf) {
tIdle.restart();
emit doDeaf();
} else if (g.s.iaeIdleAction == Settings::Mute && !g.s.bMute) {
tIdle.restart();
emit doMute();
}
}
spx_int32_t increment = 0;
speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_AGC_INCREMENT, &increment);
return;
} else {
spx_int32_t increment = 12;
speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_AGC_INCREMENT, &increment);
}
if (bIsSpeech && !bPreviousVoice) {
bResetEncoder = true;
}
示例13: abs
//.........这里部分代码省略.........
}
sum=1.0f;
for (i=0;i<iFrameSize;i++)
sum += static_cast<float>(psSource[i] * psSource[i]);
float micLevel = sqrtf(sum / static_cast<float>(iFrameSize));
dPeakSignal=20.0f*log10f(micLevel / 32768.0f);
if (dPeakSignal < -96.0f)
dPeakSignal = -96.0f;
spx_int32_t prob = 0;
speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_GET_PROB, &prob);
fSpeechProb = static_cast<float>(prob) / 100.0f;
float level = (g.s.vsVAD == Settings::SignalToNoise) ? fSpeechProb : (1.0f + dPeakMic / 96.0f);
if (level > g.s.fVADmax)
iIsSpeech = 1;
else if (level > g.s.fVADmin && bPreviousVoice)
iIsSpeech = 1;
else
iIsSpeech = 0;
if (! iIsSpeech) {
iHoldFrames++;
if (iHoldFrames < g.s.iVoiceHold)
iIsSpeech=1;
} else {
iHoldFrames = 0;
}
if (g.s.atTransmit == Settings::Continous)
iIsSpeech = 1;
else if (g.s.atTransmit == Settings::PushToTalk)
iIsSpeech = g.s.uiDoublePush && ((g.uiDoublePush < g.s.uiDoublePush) || (g.tDoublePush.elapsed() < g.s.uiDoublePush));
iIsSpeech = iIsSpeech || (g.iPushToTalk > 0) || (g.iAltSpeak > 0);
if (g.s.bMute || ((g.s.lmLoopMode != Settings::Local) && p && p->bMute) || g.bPushToMute) {
iIsSpeech = 0;
}
if (iIsSpeech) {
iSilentFrames = 0;
} else {
iSilentFrames++;
if (iSilentFrames > 200)
iFrameCounter = 0;
}
if (p)
p->setTalking(iIsSpeech, (g.iAltSpeak > 0));
if (g.s.bPushClick && (g.s.atTransmit == Settings::PushToTalk)) {
AudioOutputPtr ao = g.ao;
if (iIsSpeech && ! bPreviousVoice && ao)
ao->playSine(400.0f,1200.0f,5);
else if (ao && !iIsSpeech && bPreviousVoice && ao)
ao->playSine(620.0f,-1200.0f,5);
}
if (! iIsSpeech && ! bPreviousVoice) {
iBitrate = 0;
if (g.s.iIdleTime && ! g.s.bMute && ((tIdle.elapsed() / 1000000ULL) > g.s.iIdleTime)) {
emit doMute();
tIdle.restart();
}
return;
}
bPreviousVoice = iIsSpeech;
tIdle.restart();
if (! iIsSpeech) {
memset(psMic, 0, sizeof(short) * iFrameSize);
}
if (g.s.bTransmitPosition && g.p && ! g.bCenterPosition && (iFrames == 0) && g.p->fetch()) {
QByteArray q;
QDataStream ds(&q, QIODevice::WriteOnly);
ds << g.p->fPosition[0];
ds << g.p->fPosition[1];
ds << g.p->fPosition[2];
speex_bits_pack(&sbBits, 13, 5);
speex_bits_pack(&sbBits, q.size(), 4);
const unsigned char *d=reinterpret_cast<const unsigned char*>(q.data());
for (i=0;i<q.size();i++) {
speex_bits_pack(&sbBits, d[i], 8);
}
}
speex_encode_int(esEncState, psSource, &sbBits);
iFrames++;
speex_encoder_ctl(esEncState, SPEEX_GET_BITRATE, &iBitrate);
flushCheck();
}
示例14: qMax
//.........这里部分代码省略.........
float micLevel = sqrtf(sum / static_cast<float>(iFrameSize));
dPeakSignal = qMax(20.0f*log10f(micLevel / 32768.0f), -96.0f);
spx_int32_t prob = 0;
speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_GET_PROB, &prob);
fSpeechProb = static_cast<float>(prob) / 100.0f;
// clean microphone level: peak of filtered signal attenuated by AGC gain
dPeakCleanMic = qMax(dPeakSignal - gainValue, -96.0f);
float level = (g.s.vsVAD == Settings::SignalToNoise) ? fSpeechProb : (1.0f + dPeakCleanMic / 96.0f);
bool bIsSpeech = false;
if (level > g.s.fVADmax)
bIsSpeech = true;
else if (level > g.s.fVADmin && bPreviousVoice)
bIsSpeech = true;
if (! bIsSpeech) {
iHoldFrames++;
if (iHoldFrames < g.s.iVoiceHold)
bIsSpeech = true;
} else {
iHoldFrames = 0;
}
if (g.s.atTransmit == Settings::Continous)
bIsSpeech = true;
else if (g.s.atTransmit == Settings::PushToTalk)
bIsSpeech = g.s.uiDoublePush && ((g.uiDoublePush < g.s.uiDoublePush) || (g.tDoublePush.elapsed() < g.s.uiDoublePush));
bIsSpeech = bIsSpeech || (g.iPushToTalk > 0);
if (g.s.bMute || ((g.s.lmLoopMode != Settings::Local) && p && (p->bMute || p->bSuppress)) || g.bPushToMute || (g.iTarget < 0)) {
bIsSpeech = false;
}
if (bIsSpeech) {
iSilentFrames = 0;
} else {
iSilentFrames++;
if (iSilentFrames > 500)
iFrameCounter = 0;
}
if (p) {
if (! bIsSpeech)
p->setTalking(Settings::Passive);
else if (g.iTarget == 0)
p->setTalking(Settings::Talking);
else
p->setTalking(Settings::Shouting);
}
if (g.s.bTxAudioCue && g.uiSession != 0) {
AudioOutputPtr ao = g.ao;
if (bIsSpeech && ! bPreviousVoice && ao)
ao->playSample(g.s.qsTxAudioCueOn);
else if (ao && !bIsSpeech && bPreviousVoice && ao)
ao->playSample(g.s.qsTxAudioCueOff);
}
if (! bIsSpeech && ! bPreviousVoice) {
iBitrate = 0;
if (g.s.iIdleTime && ! g.s.bDeaf && ((tIdle.elapsed() / 1000000ULL) > g.s.iIdleTime)) {
emit doDeaf();
tIdle.restart();
}
spx_int32_t increment = 0;
speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_AGC_INCREMENT, &increment);
return;
} else {
spx_int32_t increment = 12;
speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_AGC_INCREMENT, &increment);
}
tIdle.restart();
/*
int r = celt_encoder_ctl(ceEncoder, CELT_SET_POST_MDCT_CALLBACK(celtBack, NULL));
qWarning() << "Set Callback" << r;
*/
unsigned char buffer[512];
int len;
if (umtType != MessageHandler::UDPVoiceSpeex) {
len = encodeCELTFrame(psSource, buffer);
if (len == 0)
return;
} else {
len = encodeSpeexFrame(psSource, buffer);
}
flushCheck(QByteArray(reinterpret_cast<const char *>(buffer), len), ! bIsSpeech);
if (! bIsSpeech)
iBitrate = 0;
bPreviousVoice = bIsSpeech;
}