本文整理汇总了C++中CSpEvent类的典型用法代码示例。如果您正苦于以下问题:C++ CSpEvent类的具体用法?C++ CSpEvent怎么用?C++ CSpEvent使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了CSpEvent类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: recordEvent
void speech_recognize::recordEvent()
{
USES_CONVERSION;
CSpEvent event;
HRESULT hr = S_OK;
if(m_SREngine.m_pRecoCtxt)
{
while( S_OK==event.GetFrom(m_SREngine.m_pRecoCtxt) )//等待创建语言主接口结束
{
switch(event.eEventId)
{
case SPEI_FALSE_RECOGNITION: //错误识别
break;
case SPEI_HYPOTHESIS: //假识别
case SPEI_RECOGNITION: //正确识别
{
CSpDynamicString dstrText;
if (SUCCEEDED(event.RecoResult()->GetText(SP_GETWHOLEPHRASE, SP_GETWHOLEPHRASE,
TRUE, &dstrText, NULL)))
{
m_SREngine.m_pVoice->Speak(dstrText, SPF_ASYNC, NULL);
executeCommand(event.RecoResult(), W2A(dstrText));
}
}
break;
default :
break;
}
}
}
}
示例2: GetText
CString CTTS::GetText(ULONG ulStart, ULONG nlCount)
{
USES_CONVERSION;
CSpEvent event;
CSpDynamicString dstrText;
// Process all of the recognition events
while (event.GetFrom(m_cpRecoCtxt) == S_OK)
{
switch (event.eEventId)
{
case SPEI_RECOGNITION:
// There may be multiple recognition results, so get all of them
{
HRESULT hr = S_OK;
if (nlCount == -1)
event.RecoResult()->GetText(SP_GETWHOLEPHRASE,
SP_GETWHOLEPHRASE, TRUE, &dstrText, NULL);
else
{
ASSERT(nlCount > 0);
event.RecoResult()->GetText(ulStart, nlCount, FALSE,
&dstrText, NULL);
}
}
break;
}
}
CString str=(CString)dstrText;
return str;
}
示例3: os_set_thread_name
void mssapi_captions::main_thread()
try {
HRESULT hr;
os_set_thread_name(__FUNCTION__);
hr = grammar->SetDictationState(SPRS_ACTIVE);
if (FAILED(hr))
throw HRError("SetDictationState failed", hr);
hr = recognizer->SetRecoState(SPRST_ACTIVE);
if (FAILED(hr))
throw HRError("SetRecoState(SPRST_ACTIVE) failed", hr);
HANDLE events[] = {notify, stop};
started = true;
for (;;) {
DWORD ret = WaitForMultipleObjects(2, events, false, INFINITE);
if (ret != WAIT_OBJECT_0)
break;
CSpEvent event;
bool exit = false;
while (event.GetFrom(context) == S_OK) {
if (event.eEventId == SPEI_RECOGNITION) {
ISpRecoResult *result = event.RecoResult();
CoTaskMemPtr<wchar_t> text;
hr = result->GetText((ULONG)-1, (ULONG)-1,
true, &text, nullptr);
if (FAILED(hr))
continue;
char text_utf8[512];
os_wcs_to_utf8(text, 0, text_utf8, 512);
callback(text_utf8);
blog(LOG_DEBUG, "\"%s\"", text_utf8);
} else if (event.eEventId == SPEI_END_SR_STREAM) {
exit = true;
break;
}
}
if (exit)
break;
}
audio->Stop();
} catch (HRError err) {
blog(LOG_WARNING, "%s failed: %s (%lX)", __FUNCTION__, err.str, err.hr);
}
示例4: recognitionCallback
static void __stdcall recognitionCallback(WPARAM wParam, LPARAM lParam)
{
CSpEvent ev;
while (ev.GetFrom(state.recog) == S_OK) {
if (ev.eEventId == SPEI_RECOGNITION) {
handleRecognition(ev.RecoResult());
}
}
}
示例5: RecoEvent
/*****************************************************************************************
* CSimpleDict::RecoEvent()
* Called whenever the dialog process is notified of a recognition event.
* Inserts whatever is recognized into the edit box.
******************************************************************************************/
void CSimpleDict::RecoEvent()
{
USES_CONVERSION;
CSpEvent event;
// Process all of the recognition events
while (event.GetFrom(m_cpRecoCtxt) == S_OK)
{
switch (event.eEventId)
{
case SPEI_SOUND_START:
m_bInSound = TRUE;
break;
case SPEI_SOUND_END:
if (m_bInSound)
{
m_bInSound = FALSE;
if (!m_bGotReco)
{
// The sound has started and ended,
// but the engine has not succeeded in recognizing anything
const TCHAR szNoise[] = _T("<noise>");
::SendDlgItemMessage( m_hDlg, IDC_EDIT_DICT,
EM_REPLACESEL, TRUE, (LPARAM) szNoise );
}
m_bGotReco = FALSE;
}
break;
case SPEI_RECOGNITION:
// There may be multiple recognition results, so get all of them
{
m_bGotReco = TRUE;
static const WCHAR wszUnrecognized[] = L"<Unrecognized>";
CSpDynamicString dstrText;
if (FAILED(event.RecoResult()->GetText(SP_GETWHOLEPHRASE, SP_GETWHOLEPHRASE, TRUE,
&dstrText, NULL)))
{
dstrText = wszUnrecognized;
}
// Concatenate a space onto the end of the recognized word
dstrText.Append(L" ");
::SendDlgItemMessage( m_hDlg, IDC_EDIT_DICT, EM_REPLACESEL, TRUE, (LPARAM) W2T(dstrText) );
}
break;
}
}
}
示例6: OnRecoEvent
void CMyDlg::OnRecoEvent()
{
//CMyPackManApp *pApp = (CMyPackManApp *)AfxGetApp(); //View -> App
CMainFrame *pMain = (CMainFrame *)AfxGetMainWnd(); //View -> MainFrm
CMyPackManView *pView = (CMyPackManView *)pMain->GetActiveView();
USES_CONVERSION;
CSpEvent event;
//MessageBox(L"A");
while (event.GetFrom(m_cpRecoCtxt) == S_OK)
{
switch (event.eEventId)
{
case SPEI_RECOGNITION:
{
m_bReco = TRUE;
static const WCHAR wszUnrecognized[] = L"fail";
CSpDynamicString dstrText;
if (FAILED(event.RecoResult()->GetText(SP_GETWHOLEPHRASE, SP_GETWHOLEPHRASE, TRUE,
&dstrText, NULL)))
{
dstrText = wszUnrecognized;
}
dstrText.Append(L" ");
m_Edit = dstrText;
//if (m_Edit == "up " || m_Edit == "Up " || m_Edit == "Down " || m_Edit == "down " || m_Edit == "left " || m_Edit == "Left " || m_Edit == "Right " || m_Edit == "right "
// || m_Edit == "let " || m_Edit == "light " || m_Edit == "night ")
// pView->m_EditV = m_Edit;
////::SendDlgItemMessage(m_hWnd, IDC_EDIT_DICT, EM_REPLACESEL, TRUE, (LPARAM)W2T(dstrText));
//UpdateData(TRUE);
//m_pView->Invalidate();
if (m_Edit == "down " || m_Edit == "Down ")
m_pView->mKey = DOWN;
else if (m_Edit == "up " || m_Edit == "Up ")
m_pView->mKey = UP;
else if (m_Edit == "one " || m_Edit == "One ")
m_pView->mKey = LEFT;
else if (m_Edit == "two " || m_Edit == "Two ")
m_pView->mKey = RITE;
UpdateData(TRUE);
//UpdateData(FALSE);
}
break;
}
}
}
示例7: VOICEREC_process_event
void VOICEREC_process_event(HWND hWnd)
{
CSpEvent event; // Event helper class
// Loop processing events while there are any in the queue
while (event.GetFrom(p_recogContext) == S_OK)
{
// Look at recognition event only
switch (event.eEventId)
{
case SPEI_RECOGNITION:
VOICEREC_execute_command(event.RecoResult(), hWnd);
break;
}
}
}
示例8: BlockForResult
//input the sound
inline HRESULT BlockForResult(ISpRecoContext * pRecoCtxt, ISpRecoResult ** ppResult) //recording variable and the result
{
HRESULT hr = S_OK;
CSpEvent event;
while (SUCCEEDED(hr) &&
SUCCEEDED(hr = event.GetFrom(pRecoCtxt)) &&
hr == S_FALSE)
{
hr = pRecoCtxt->WaitForNotifyEvent(INFINITE);
}
*ppResult = event.RecoResult();
if (*ppResult)
{
(*ppResult)->AddRef();
}
return hr;
}
示例9: ProcessRecoEvent
/******************************************************************************
* ProcessRecoEvent *
*------------------*
* Description:
* Called to when reco event message is sent to main window procedure.
* In the case of a recognition, it extracts result and calls ExecuteCommand.
*
******************************************************************************/
void ProcessRecoEvent( HWND hWnd )
{
CSpEvent event; // Event helper class
// Loop processing events while there are any in the queue
while (event.GetFrom(g_cpRecoCtxt) == S_OK)
{
// Look at recognition event only
switch (event.eEventId)
{
case SPEI_RECOGNITION:
ExecuteCommand(event.RecoResult(), hWnd);
break;
case SPEI_FALSE_RECOGNITION:
HandleFalseReco(event.RecoResult(), hWnd);
break;
}
}
}
示例10: MainHandleSynthEvent
void CTTSApp::MainHandleSynthEvent()
/////////////////////////////////////////////////////////////////
//
// Handles the WM_TTSAPPCUSTOMEVENT application defined message and all
// of it's appropriate SAPI5 events.
//
{
CSpEvent event; // helper class in sphelper.h for events that releases any
// allocated memory in it's destructor - SAFER than SPEVENT
SPVOICESTATUS Stat;
WPARAM nStart;
LPARAM nEnd;
int i = 0;
HRESULT hr = S_OK;
while( event.GetFrom(m_cpVoice) == S_OK )
{
switch( event.eEventId )
{
case SPEI_START_INPUT_STREAM:
if( IsDlgButtonChecked( m_hWnd, IDC_EVENTS ) )
{
TTSAppStatusMessage( m_hWnd, _T("StartStream event\r\n") );
}
break;
case SPEI_END_INPUT_STREAM:
// Set global boolean stop to TRUE when finished speaking
m_bStop = TRUE;
// Highlight entire text
nStart = 0;
nEnd = SendDlgItemMessage( m_hWnd, IDE_EDITBOX, WM_GETTEXTLENGTH, 0, 0 );
SendDlgItemMessage( m_hWnd, IDE_EDITBOX, EM_SETSEL, nStart, nEnd );
// Mouth closed
g_iBmp = 0;
InvalidateRect( m_hChildWnd, NULL, FALSE );
if( IsDlgButtonChecked( m_hWnd, IDC_EVENTS ) )
{
TTSAppStatusMessage( m_hWnd, _T("EndStream event\r\n") );
}
break;
case SPEI_VOICE_CHANGE:
if( IsDlgButtonChecked( m_hWnd, IDC_EVENTS ) )
{
TTSAppStatusMessage( m_hWnd, _T("Voicechange event\r\n") );
}
break;
case SPEI_TTS_BOOKMARK:
if( IsDlgButtonChecked( m_hWnd, IDC_EVENTS ) )
{
// Get the string associated with the bookmark
// and add the null terminator.
TCHAR szBuff2[MAX_PATH] = _T("Bookmark event: ");
size_t cEventString = wcslen( event.String() ) + 1;
WCHAR *pwszEventString = new WCHAR[ cEventString ];
if ( pwszEventString )
{
wcscpy_s( pwszEventString, cEventString, event.String() );
_tcscat_s( szBuff2, _countof(szBuff2), CW2T(pwszEventString) );
delete[] pwszEventString;
}
_tcscat_s( szBuff2, _countof(szBuff2), _T("\r\n") );
TTSAppStatusMessage( m_hWnd, szBuff2 );
}
break;
case SPEI_WORD_BOUNDARY:
hr = m_cpVoice->GetStatus( &Stat, NULL );
if( FAILED( hr ) )
{
TTSAppStatusMessage( m_hWnd, _T("Voice GetStatus error\r\n") );
}
// Highlight word
nStart = (LPARAM)( Stat.ulInputWordPos / sizeof(char) );
nEnd = nStart + Stat.ulInputWordLen;
SendDlgItemMessage( m_hWnd, IDE_EDITBOX, EM_SETSEL, nStart, nEnd );
if( IsDlgButtonChecked( m_hWnd, IDC_EVENTS ) )
{
TTSAppStatusMessage( m_hWnd, _T("Wordboundary event\r\n") );
}
break;
case SPEI_PHONEME:
if( IsDlgButtonChecked( m_hWnd, IDC_EVENTS ) )
{
TTSAppStatusMessage( m_hWnd, _T("Phoneme event\r\n") );
}
break;
case SPEI_VISEME:
// Get the current mouth viseme position and map it to one of the
// 7 mouth bitmaps.
g_iBmp = g_aMapVisemeToImage[event.Viseme()]; // current viseme
//.........这里部分代码省略.........
示例11: callback
/**
This is called when SAPI 5.1 has an event.
In the textless case, we only handle SPIE_RECOGNITION event. We aren't looking
at SPIE_HYPOTHESIS. This might be an error. We might be more robust by handling
both.
We process the event and add the phonemes we get to the result list
**/
void sapi_textless_lipsync::callback()
{
CSpEvent event; // the event
ISpRecoResult *pRecoResult; // recoResult from the event
SPPHRASE *pSpPhrase; // phrase from recoResult
SPRECORESULTTIMES pRecoResultTimes; // result times from RecoResult
WCHAR phone_buffer[256]; // phoneme buffer for conversion
long msStart; // time stamp of the result
while (event.GetFrom(this->m_recogCntxt) == S_OK)
{
if (event.eEventId == SPEI_RECOGNITION /*|| event.eEventId == SPEI_HYPOTHESIS */)
{
// for textless we only accept full recognition. This might be an area
// to watch out for
// pull out the result object
pRecoResult = event.RecoResult();
// pull the whole text from the result
CSpDynamicString pSapiText;
pRecoResult->GetText(SP_GETWHOLEPHRASE, SP_GETWHOLEPHRASE, FALSE, &pSapiText, NULL);
// get the start time for the phrase. we use this as an offset for the phrase
// elements. Not sure if this is correct.
pRecoResult->GetResultTimes(&pRecoResultTimes);
msStart = sapi_time_to_milli(pRecoResultTimes.ullStart);
// extract the phrase object
pRecoResult->GetPhrase(&pSpPhrase);
if (pSpPhrase != NULL)
{
// Process each element of the phrase. These should be our
// orthorgraphs
const SPPHRASEELEMENT *p = pSpPhrase->pElements;
const SPPHRASEELEMENT *pEnd = p + pSpPhrase->Rule.ulCountOfElements;
while (p != pEnd)
{
// for each phrase element we create a marker
// that contains the time stamps along with the
// phonemes. associated with it.
alignment_result al;
al.m_orthography = p->pszDisplayText;
// Get the phonemes
ULONG j = 0;
SPPHONEID phn[2];
phn[1] = 0x00;
while (p->pszPronunciation[j] != 0)
{
// process each phoneme
phn[0] = p->pszPronunciation[j];
m_phnCvt->IdToPhone(phn, phone_buffer);
al.m_phonemes.push_back(phone_buffer);
j++;
}
// start time of the ortheme
al.m_msStart= msStart + bytes_to_milli(p->ulAudioStreamOffset);
// end time of the ortheme
al.m_msEnd = bytes_to_milli(p->ulAudioSizeBytes);
al.m_msEnd += al.m_msStart;
// add it to the results
m_results.push_back(al);
p++;
}
}
}
else if (event.eEventId == SPEI_END_SR_STREAM)
{
// This event occurs when the stream has finished processing.
// we set a flag to indicate that things are done.
m_bDone = TRUE;
}
}
}
示例12: W2A
void RSpeechRecognition::CallbackRule()
{
USES_CONVERSION;
HRESULT hr;
std::string dictationString;
CSpEvent ruleEvent;
hr = ruleEvent.GetFrom( this->RuleRecoCtxt );
if ( FAILED(hr) ) return ;
//認識した結果
ISpRecoResult* result;
result = ruleEvent.RecoResult();
//認識した文字列の取得
CSpDynamicString dstrText;
hr = result->GetText(SP_GETWHOLEPHRASE, SP_GETWHOLEPHRASE, TRUE, &dstrText, NULL);
if ( FAILED(hr) ) return ;
this->ResultString = W2A(dstrText);
//ルールベースで認識した結果の音声部分をもう一度 ディクテーションにかけます。
//これで過剰なマッチを排除します。
{
CComPtr<ISpStreamFormat> resultStream;
hr = result->GetAudio( 0, 0, &resultStream );
if ( FAILED(hr) ) return;
//オーディオから読み込んでね
hr = this->DictationEngine->SetInput( resultStream, TRUE);
if(FAILED(hr)) return;
hr = this->DictationGrammar->SetDictationState(SPRS_ACTIVE );
if(FAILED(hr)) return;
hr = this->DictationRecoCtxt->WaitForNotifyEvent(10000); //10秒タイムアウト
if ( FAILED(hr) ) return;
hr = this->DictationGrammar->SetDictationState(SPRS_INACTIVE );
if(FAILED(hr)) return;
CSpEvent tempevent;
hr = tempevent.GetFrom( this->DictationRecoCtxt );
if ( FAILED(hr) ) return ;
//認識した結果
ISpRecoResult* tempresult;
tempresult = tempevent.RecoResult();
//認識した文字列の取得
CSpDynamicString tempdstrText;
hr = tempresult->GetText(SP_GETWHOLEPHRASE, SP_GETWHOLEPHRASE, TRUE, &tempdstrText, NULL);
if ( FAILED(hr) ) return ;
std::string dictationString = W2A(tempdstrText);
//ディクテーションフィルターで絞る
if ( dictationString.find(this->DicticationFilterWord) == std::string::npos )
{
//フィルターにより拒否
this->FlagCleanup();
return ;
}
}
//認識に XMLを使用した場合、代入された結果を得る.
SPPHRASE *pPhrase;
hr = result->GetPhrase(&pPhrase);
if ( FAILED(hr) ) return ;
this->ResultMap.clear();
const SPPHRASEPROPERTY *pProp;
for (pProp = pPhrase->pProperties; pProp; pProp = pProp->pNextSibling)
{
this->ResultMap[ W2A(pProp->pszName) ] = W2A(pProp->pszValue);
}
CoTaskMemFree(pPhrase);
//コマンド認識
SendMessage(this->CallbackWindowHandle , this->CallbackWindowMesage , 0 , 0);
this->FlagCleanup();
}
示例13: decltype
//.........这里部分代码省略.........
hr = context->SetNotifyWin32Event();
if (FAILED(hr))
throw HRError("SetNotifyWin32Event", hr);
notify = context->GetNotifyEventHandle();
if (notify == INVALID_HANDLE_VALUE)
throw HRError("GetNotifyEventHandle failed", E_NOINTERFACE);
size_t sample_rate = audio_output_get_sample_rate(obs_get_audio());
audio = new CaptionStream((DWORD)sample_rate);
audio->Release();
hr = recognizer->SetInput(audio, false);
if (FAILED(hr))
throw HRError("SetInput failed", hr);
hr = context->CreateGrammar(1, &grammar);
if (FAILED(hr))
throw HRError("CreateGrammar failed", hr);
hr = grammar->LoadDictation(nullptr, SPLO_STATIC);
if (FAILED(hr))
throw HRError("LoadDictation failed", hr);
hr = grammar->SetDictationState(SPRS_ACTIVE);
if (FAILED(hr))
throw HRError("SetDictationState failed", hr);
hr = recognizer->SetRecoState(SPRST_ACTIVE);
if (FAILED(hr))
throw HRError("SetRecoState(SPRST_ACTIVE) failed", hr);
HANDLE events[] = {notify, stop_event};
{
captions->source = GetWeakSourceByName(
captions->source_name.c_str());
OBSSource strong = OBSGetStrongRef(source);
if (strong)
obs_source_add_audio_capture_callback(strong,
pre_cb, &cb);
}
for (;;) {
DWORD ret = WaitForMultipleObjects(2, events, false, INFINITE);
if (ret != WAIT_OBJECT_0)
break;
CSpEvent event;
bool exit = false;
while (event.GetFrom(context) == S_OK) {
if (event.eEventId == SPEI_RECOGNITION) {
ISpRecoResult *result = event.RecoResult();
CoTaskMemPtr<wchar_t> text;
hr = result->GetText((ULONG)-1, (ULONG)-1,
true, &text, nullptr);
if (FAILED(hr))
continue;
char text_utf8[512];
os_wcs_to_utf8(text, 0, text_utf8, 512);
obs_output_t *output =
obs_frontend_get_streaming_output();
if (output)
obs_output_output_caption_text1(output,
text_utf8);
debug("\"%s\"", text_utf8);
obs_output_release(output);
} else if (event.eEventId == SPEI_END_SR_STREAM) {
exit = true;
break;
}
}
if (exit)
break;
}
{
OBSSource strong = OBSGetStrongRef(source);
if (strong)
obs_source_remove_audio_capture_callback(strong,
pre_cb, &cb);
}
audio->Stop();
CoUninitialize();
} catch (HRError err) {
error("%s failed: %s (%lX)", __FUNCTION__, err.str, err.hr);
CoUninitialize();
captions->th.detach();
}
示例14: SpFindBestToken
//.........这里部分代码省略.........
// Set all top-level rules in the new grammar to the active state.
if (SUCCEEDED(hr))
{
hr = cpGrammar->SetRuleState(NULL, NULL, SPRS_ACTIVE);
}
// Set the recognizer state to active to begin recognition.
if (SUCCEEDED(hr))
{
hr = cpRecognizer->SetRecoState(SPRST_ACTIVE_ALWAYS);
}
// Establish a separate Win32 event to signal the event loop exit.
HANDLE hExitEvent = CreateEventW(NULL, FALSE, FALSE, NULL);
// Collect the events listened for to pump the speech event loop.
HANDLE rghEvents[] = { hSpeechNotifyEvent, hExitEvent };
// Speech recognition event loop.
BOOL fContinue = TRUE;
while (fContinue && SUCCEEDED(hr))
{
// Wait for either a speech event or an exit event, with a 15 second timeout.
DWORD dwMessage = WaitForMultipleObjects(sp_countof(rghEvents), rghEvents, FALSE, 15000);
switch (dwMessage)
{
// With the WaitForMultipleObjects call above, WAIT_OBJECT_0 is a speech event from hSpeechNotifyEvent.
case WAIT_OBJECT_0:
{
// Sequentially grab the available speech events from the speech event queue.
CSpEvent spevent;
while (S_OK == spevent.GetFrom(cpContext))
{
switch (spevent.eEventId)
{
case SPEI_RECOGNITION:
{
// Retrieve the recognition result and output the text of that result.
ISpRecoResult* pResult = spevent.RecoResult();
LPWSTR pszCoMemResultText = NULL;
hr = pResult->GetText(SP_GETWHOLEPHRASE, SP_GETWHOLEPHRASE, TRUE, &pszCoMemResultText, NULL);
if (SUCCEEDED(hr))
{
wprintf(L"Recognition event received, text=\"%s\"\r\n", pszCoMemResultText);
}
// Also retrieve the retained audio we requested.
CComPtr<ISpStreamFormat> cpRetainedAudio;
if (SUCCEEDED(hr))
{
hr = pResult->GetAudio(0, 0, &cpRetainedAudio);
}
// To demonstrate, we'll speak the retained audio back using ISpVoice.
CComPtr<ISpVoice> cpVoice;
if (SUCCEEDED(hr))
{
hr = cpVoice.CoCreateInstance(CLSID_SpVoice);
示例15: _A2W
//ルールベースで認識した結果の音声部分をもう一度 ディクテーションにかけます。
//これで過剰なマッチを排除します。
xreturn::r<std::string> Recognition_SAPI::convertDictation(ISpRecoResult* result,const std::string& ruleName)
{
HRESULT hr;
_USE_WINDOWS_ENCODING;
CComPtr<ISpStreamFormat> resultStream;
{
hr = result->GetAudio( 0, 1, &resultStream );
if(FAILED(hr)) return xreturn::windowsError(hr);
//オーディオから読み込んでね
hr = this->DictationEngine->SetInput( resultStream, TRUE);
if(FAILED(hr)) return xreturn::windowsError(hr);
hr = this->DictationGrammar->SetRuleState(ruleName.empty() ? NULL : _A2W(ruleName.c_str()), NULL, SPRS_ACTIVE );
if(FAILED(hr)) return xreturn::windowsError(hr);
hr = this->DictationRecoCtxt->WaitForNotifyEvent(2000); //2秒タイムアウト
if(FAILED(hr)) return xreturn::windowsError(hr);
hr = this->DictationGrammar->SetRuleState(NULL, NULL, SPRS_INACTIVE );
if(FAILED(hr)) return xreturn::windowsError(hr);
{
CSpEvent tempevent;
hr = tempevent.GetFrom( this->DictationRecoCtxt );
if(FAILED(hr)) return xreturn::windowsError(hr);
if (tempevent.eEventId == SPEI_RECOGNITION)
{//認識した結果
ISpRecoResult* tempresult;
{
tempresult = tempevent.RecoResult();
//認識した文字列の取得
CSpDynamicString tempdstrText;
hr = tempresult->GetText(SP_GETWHOLEPHRASE, SP_GETWHOLEPHRASE, TRUE, &tempdstrText, NULL);
if(FAILED(hr)) return xreturn::windowsError(hr);
SPPHRASE *pPhrase;
hr = tempresult->GetPhrase(&pPhrase);
if ( FAILED(hr) ) return xreturn::windowsError(hr);
double confidence = pPhrase->pElements->SREngineConfidence;
std::string ret = _W2A(tempdstrText);
this->PoolMainWindow->SyncInvokeLog(std::string() + "ディクテーションフィルター :" + ret + + " " + num2str(confidence),LOG_LEVEL_DEBUG);
if (confidence <= 0.60)
{
this->PoolMainWindow->SyncInvokeLog(std::string() + "ディクテーションフィルター棄却",LOG_LEVEL_DEBUG);
return "";
}
return ret;
}
}
}
}
//不明
return "";
}