本文整理汇总了C++中IAudioClient类的典型用法代码示例。如果您正苦于以下问题:C++ IAudioClient类的具体用法?C++ IAudioClient怎么用?C++ IAudioClient使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了IAudioClient类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: BlankAudioPlayback
BlankAudioPlayback(CTSTR lpDevice)
{
const CLSID CLSID_MMDeviceEnumerator = __uuidof(MMDeviceEnumerator);
const IID IID_IMMDeviceEnumerator = __uuidof(IMMDeviceEnumerator);
const IID IID_IAudioClient = __uuidof(IAudioClient);
const IID IID_IAudioRenderClient = __uuidof(IAudioRenderClient);
HRESULT err;
err = CoCreateInstance(CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, IID_IMMDeviceEnumerator, (void**)&mmEnumerator);
if(FAILED(err))
CrashError(TEXT("Could not create IMMDeviceEnumerator: 0x%08lx"), err);
if (scmpi(lpDevice, TEXT("Default")) == 0)
err = mmEnumerator->GetDefaultAudioEndpoint(eRender, eConsole, &mmDevice);
else
err = mmEnumerator->GetDevice(lpDevice, &mmDevice);
if(FAILED(err))
CrashError(TEXT("Could not create IMMDevice"));
err = mmDevice->Activate(IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&mmClient);
if(FAILED(err))
CrashError(TEXT("Could not create IAudioClient"));
WAVEFORMATEX *pwfx;
err = mmClient->GetMixFormat(&pwfx);
if(FAILED(err))
CrashError(TEXT("Could not get mix format from audio client"));
UINT inputBlockSize = pwfx->nBlockAlign;
err = mmClient->Initialize(AUDCLNT_SHAREMODE_SHARED, 0, ConvertMSTo100NanoSec(1000), 0, pwfx, NULL);
if(FAILED(err))
CrashError(TEXT("Could not initialize audio client, error = %08lX"), err);
err = mmClient->GetService(IID_IAudioRenderClient, (void**)&mmRender);
if(FAILED(err))
CrashError(TEXT("Could not get audio render client"));
//----------------------------------------------------------------
UINT bufferFrameCount;
err = mmClient->GetBufferSize(&bufferFrameCount);
if(FAILED(err))
CrashError(TEXT("Could not get audio buffer size"));
BYTE *lpData;
err = mmRender->GetBuffer(bufferFrameCount, &lpData);
if(FAILED(err))
CrashError(TEXT("Could not get audio buffer"));
zero(lpData, bufferFrameCount*inputBlockSize);
mmRender->ReleaseBuffer(bufferFrameCount, 0);//AUDCLNT_BUFFERFLAGS_SILENT); //probably better if it doesn't know
if(FAILED(mmClient->Start()))
CrashError(TEXT("Could not start audio source"));
}
示例2: initialize_patch
HRESULT __stdcall initialize_patch(
IAudioClient* this_, AUDCLNT_SHAREMODE ShareMode, DWORD StreamFlags,
REFERENCE_TIME hnsBufferDuration, REFERENCE_TIME hnsPeriodicity,
const WAVEFORMATEX* pFormat, LPCGUID AudioSessionGuid)
{
// synchronize initializing so it doesn't happen while streams are being flushed
HANDLE audio_router_mutex = OpenMutexW(SYNCHRONIZE, FALSE, L"Local\\audio-router-mutex");
assert(audio_router_mutex != NULL);
if(audio_router_mutex)
{
DWORD res = WaitForSingleObject(audio_router_mutex, INFINITE);
assert(res == WAIT_OBJECT_0);
}
IAudioClient* proxy = get_duplicate(this_)->proxy;
LPCGUID guid = ((GUID***)this_)[0][17];
DWORD_PTR* old_vftptr = swap_vtable(this_);
HRESULT hr = proxy->Initialize(
ShareMode,
StreamFlags |
AUDCLNT_SESSIONFLAGS_EXPIREWHENUNOWNED |
AUDCLNT_SESSIONFLAGS_DISPLAY_HIDEWHENEXPIRED,
hnsBufferDuration,
hnsPeriodicity,
pFormat,
guid);
((DWORD_PTR**)this_)[0] = old_vftptr;
if(hr != S_OK)
tell_error(hr);
else
*((WORD***)this_)[0][18] = pFormat->nBlockAlign;
if(hr == S_OK)
{
for(iaudioclient_duplicate* next = get_duplicate(this_)->next;
next != NULL; next = next->next)
{
HRESULT hr2 = next->proxy->Initialize(
ShareMode,
StreamFlags |
AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM |
AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY |
AUDCLNT_SESSIONFLAGS_EXPIREWHENUNOWNED |
AUDCLNT_SESSIONFLAGS_DISPLAY_HIDEWHENEXPIRED,
hnsBufferDuration,
hnsPeriodicity,
pFormat,
guid);
if(hr2 != S_OK)
tell_error(hr2);
}
}
ReleaseMutex(audio_router_mutex);
CloseHandle(audio_router_mutex);
return hr;
}
示例3: getmixformat_patch
HRESULT __stdcall getmixformat_patch(IAudioClient* this_, WAVEFORMATEX** ppDeviceFormat)
{
// STATIC FUNCTION
IAudioClient* proxy = get_duplicate(this_)->proxy;
DWORD_PTR* old_vftptr = swap_vtable(this_);
HRESULT hr = proxy->GetMixFormat(ppDeviceFormat);
((DWORD_PTR**)this_)[0] = old_vftptr;
return hr;
}
示例4: reset_patch
HRESULT __stdcall reset_patch(IAudioClient* this_)
{
IAudioClient* proxy = get_duplicate(this_)->proxy;
DWORD_PTR* old_vftptr = swap_vtable(this_);
HRESULT hr = proxy->Reset();
((DWORD_PTR**)this_)[0] = old_vftptr;
for(iaudioclient_duplicate* next = get_duplicate(this_)->next; next != NULL; next = next->next)
next->proxy->Reset();
return hr;
}
示例5: seteventhandle_patch
HRESULT __stdcall seteventhandle_patch(IAudioClient* this_, HANDLE eventHandle)
{
IAudioClient* proxy = get_duplicate(this_)->proxy;
for(iaudioclient_duplicate* next = get_duplicate(this_)->next; next != NULL; next = next->next)
next->proxy->SetEventHandle(eventHandle);
DWORD_PTR* old_vftptr = swap_vtable(this_);
HRESULT hr = proxy->SetEventHandle(eventHandle);
((DWORD_PTR**)this_)[0] = old_vftptr;
return hr;
}
示例6: isformatsupported_patch
HRESULT __stdcall isformatsupported_patch(
IAudioClient* this_, AUDCLNT_SHAREMODE ShareMode,
const WAVEFORMATEX* pFormat, WAVEFORMATEX** ppClosestMatch)
{
// STATIC FUNCTION
IAudioClient* proxy = get_duplicate(this_)->proxy;
DWORD_PTR* old_vftptr = swap_vtable(this_);
HRESULT hr = proxy->IsFormatSupported(ShareMode, pFormat, ppClosestMatch);
((DWORD_PTR**)this_)[0] = old_vftptr;
return hr;
}
示例7: start_patch
HRESULT __stdcall start_patch(IAudioClient* this_)
{
IAudioClient* proxy = get_duplicate(this_)->proxy;
DWORD_PTR* old_vftptr = swap_vtable(this_);
HRESULT hr = proxy->Start(), hr2;
((DWORD_PTR**)this_)[0] = old_vftptr;
if(hr == S_OK)
{
for(iaudioclient_duplicate* next = get_duplicate(this_)->next; next != NULL; next = next->next)
hr2 = next->proxy->Start();
}
return hr;
}
示例8: getbuffersize_patch
HRESULT __stdcall getbuffersize_patch(IAudioClient* this_, UINT32* pNumBufferFrames)
{
IAudioClient* proxy = get_duplicate(this_)->proxy;
DWORD_PTR* old_vftptr = swap_vtable(this_);
HRESULT hr = proxy->GetBufferSize(pNumBufferFrames);
((DWORD_PTR**)this_)[0] = old_vftptr;
for(iaudioclient_duplicate* next = get_duplicate(this_)->next; next != NULL; next = next->next)
{
UINT32 buf;
HRESULT hr = next->proxy->GetBufferSize(&buf);
assert(buf >= *pNumBufferFrames);
}
return hr;
}
示例9: getstreamlatency_patch
HRESULT __stdcall getstreamlatency_patch(IAudioClient* this_, REFERENCE_TIME* phnsLatency)
{
IAudioClient* proxy = get_duplicate(this_)->proxy;
DWORD_PTR* old_vftptr = swap_vtable(this_);
HRESULT hr = proxy->GetStreamLatency(phnsLatency);
((DWORD_PTR**)this_)[0] = old_vftptr;
for(iaudioclient_duplicate* next = get_duplicate(this_)->next; next != NULL; next = next->next)
{
REFERENCE_TIME t;
next->proxy->GetStreamLatency(&t);
assert(*phnsLatency == t);
}
return hr;
}
示例10: getcurrentpadding_patch
HRESULT __stdcall getcurrentpadding_patch(IAudioClient* this_, UINT32* pNumPaddingFrames)
{
IAudioClient* proxy = get_duplicate(this_)->proxy;
DWORD_PTR* old_vftptr = swap_vtable(this_);
HRESULT hr = proxy->GetCurrentPadding(pNumPaddingFrames);
((DWORD_PTR**)this_)[0] = old_vftptr;
for(iaudioclient_duplicate* next = get_duplicate(this_)->next; next != NULL; next = next->next)
{
UINT32 pad;
next->proxy->GetCurrentPadding(&pad);
//assert(pad == *pNumPaddingFrames);
}
return hr;
}
示例11: getdeviceperiod_patch
HRESULT __stdcall getdeviceperiod_patch(
IAudioClient* this_,
REFERENCE_TIME* phnsDefaultDevicePeriod, REFERENCE_TIME* phnsMinimumDevicePeriod)
{
// STATIC FUNCTION
IAudioClient* proxy = get_duplicate(this_)->proxy;
DWORD_PTR* old_vftptr = swap_vtable(this_);
HRESULT hr = proxy->GetDevicePeriod(phnsDefaultDevicePeriod, phnsMinimumDevicePeriod);
((DWORD_PTR**)this_)[0] = old_vftptr;
for(iaudioclient_duplicate* next = get_duplicate(this_)->next; next != NULL; next = next->next)
{
REFERENCE_TIME def, min;
next->proxy->GetDevicePeriod(&def, &min);
assert(def == *phnsDefaultDevicePeriod && min == *phnsMinimumDevicePeriod);
}
return hr;
}
示例12: getservice_patch
HRESULT __stdcall getservice_patch(IAudioClient* this_, REFIID riid, void** ppv)
{
IAudioClient* proxy = get_duplicate(this_)->proxy;
DWORD_PTR* old_vftptr = swap_vtable(this_);
HRESULT hr = proxy->GetService(riid, ppv);
((DWORD_PTR**)this_)[0] = old_vftptr;
// renderclient list has 1:1 mapping to audioclient
if(hr == S_OK)
{
if(riid == __uuidof(IAudioRenderClient))
{
IAudioRenderClient* host = *((IAudioRenderClient**)ppv);
patch_iaudiorenderclient(host, *((WORD***)this_)[0][18]);
for(iaudioclient_duplicate* next = get_duplicate(this_)->next;
next != NULL; next = next->next)
{
IAudioRenderClient* renderclient = NULL;
next->proxy->GetService(riid, (void**)&renderclient);
get_duplicate(host)->add(renderclient);
}
}
else if(riid == __uuidof(IAudioStreamVolume))
{
IAudioStreamVolume* host = *((IAudioStreamVolume**)ppv);
patch_iaudiostreamvolume(host);
for(iaudioclient_duplicate* next = get_duplicate(this_)->next;
next != NULL; next = next->next)
{
IAudioStreamVolume* streamvolume = NULL;
next->proxy->GetService(riid, (void**)&streamvolume);
if(streamvolume != NULL)
get_duplicate(host)->add(streamvolume);
}
}
}
return hr;
}
示例13: propagateWithRawCurrentFormat
void propagateWithRawCurrentFormat(WAVEFORMATEX *toThis) {
WAVEFORMATEX *pwfx;
IMMDevice *pMMDevice;
IAudioClient *pAudioClient;
HANDLE hTask;
DWORD nTaskIndex = 0;
hTask = AvSetMmThreadCharacteristics(L"Capture", &nTaskIndex);
HRESULT hr = get_default_device(&pMMDevice);
if (FAILED(hr)) {
assert(false);
}
// activate an (the default, for us, since we want loopback) IAudioClient
hr = pMMDevice->Activate(
__uuidof(IAudioClient),
CLSCTX_ALL, NULL,
(void**)&pAudioClient
);
if (FAILED(hr)) {
ShowOutput("IMMDevice::Activate(IAudioClient) failed: hr = 0x%08x", hr);
assert(false);
}
hr = pAudioClient->GetMixFormat(&pwfx);
if (FAILED(hr)) {
ShowOutput("IAudioClient::GetMixFormat failed: hr = 0x%08x\n", hr);
CoTaskMemFree(pwfx);
pAudioClient->Release();
assert(false);
}
pAudioClient->Stop();
AvRevertMmThreadCharacteristics(hTask);
pAudioClient->Release();
pMMDevice->Release();
memcpy(toThis, pwfx, sizeof(WAVEFORMATEX));
CoTaskMemFree(pwfx);
}
示例14: __uuidof
//HRESULT LoopbackCapture(
// IMMDevice *pMMDevice,
// bool bInt16,
// HANDLE hStartedEvent,
// HANDLE hStopEvent,
// PUINT32 pnFrames,
// HMMIO hFile,
// AudioBuffer *pBuffer
//)
HRESULT LoopbackCapture::Process()
{
HRESULT hr;
// activate an IAudioClient
IAudioClient *pAudioClient;
hr = pMMDevice->Activate(
__uuidof(IAudioClient),
CLSCTX_ALL, NULL,
(void**)&pAudioClient
);
if (FAILED(hr)) {
printf("IMMDevice::Activate(IAudioClient) failed: hr = 0x%08x", hr);
return hr;
}
// get the default device periodicity
REFERENCE_TIME hnsDefaultDevicePeriod;
hr = pAudioClient->GetDevicePeriod(&hnsDefaultDevicePeriod, NULL);
if (FAILED(hr)) {
printf("IAudioClient::GetDevicePeriod failed: hr = 0x%08x\n", hr);
pAudioClient->Release();
return hr;
}
// get the default device format
WAVEFORMATEX *pwfx;
hr = pAudioClient->GetMixFormat(&pwfx);
if (FAILED(hr)) {
printf("IAudioClient::GetMixFormat failed: hr = 0x%08x\n", hr);
CoTaskMemFree(pwfx);
pAudioClient->Release();
return hr;
}
if (pwfx->wFormatTag == WAVE_FORMAT_EXTENSIBLE)
{
PWAVEFORMATEXTENSIBLE pEx = reinterpret_cast<PWAVEFORMATEXTENSIBLE>(pwfx);
//pEx->SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
printf("WAVE_FORMAT_EXTENSIBLE\n");
if (IsEqualGUID(KSDATAFORMAT_SUBTYPE_IEEE_FLOAT, pEx->SubFormat))
{
printf("float\n");
}//
else if (IsEqualGUID(KSDATAFORMAT_SUBTYPE_PCM, pEx->SubFormat))
{
printf("PCM\n");
}//KSDATAFORMAT_SUBTYPE_WAVEFORMATEX
else if (IsEqualGUID(KSDATAFORMAT_SUBTYPE_WAVEFORMATEX, pEx->SubFormat))
{
printf("WAVEFORMATEX\n");
}
}
if (bInt16) {
// coerce int-16 wave format
// can do this in-place since we're not changing the size of the format
// also, the engine will auto-convert from float to int for us
switch (pwfx->wFormatTag) {
case WAVE_FORMAT_IEEE_FLOAT:
pwfx->wFormatTag = WAVE_FORMAT_PCM;
pwfx->wBitsPerSample = 16;
pwfx->nBlockAlign = pwfx->nChannels * pwfx->wBitsPerSample / 8;
pwfx->nAvgBytesPerSec = pwfx->nBlockAlign * pwfx->nSamplesPerSec;
break;
case WAVE_FORMAT_EXTENSIBLE:
{
// naked scope for case-local variable
PWAVEFORMATEXTENSIBLE pEx = reinterpret_cast<PWAVEFORMATEXTENSIBLE>(pwfx);
if (IsEqualGUID(KSDATAFORMAT_SUBTYPE_IEEE_FLOAT, pEx->SubFormat)) {
pEx->SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
pEx->Samples.wValidBitsPerSample = 16;
pwfx->wBitsPerSample = 16;
pwfx->nBlockAlign = pwfx->nChannels * pwfx->wBitsPerSample / 8;
pwfx->nAvgBytesPerSec = pwfx->nBlockAlign * pwfx->nSamplesPerSec;
} else {
printf("Don't know how to coerce mix format to int-16\n");
CoTaskMemFree(pwfx);
pAudioClient->Release();
return E_UNEXPECTED;
}
}
break;
default:
printf("Don't know how to coerce WAVEFORMATEX with wFormatTag = 0x%08x to int-16\n", pwfx->wFormatTag);
CoTaskMemFree(pwfx);
pAudioClient->Release();
return E_UNEXPECTED;
}
//.........这里部分代码省略.........
示例15: InitializeAudioEngine
//
// Initialize WASAPI in event driven mode, associate the audio client with our samples ready event handle, retrieve
// a capture client for the transport, create the capture thread and start the audio engine.
//
bool CWASAPICapture::InitializeAudioEngine()
{
HRESULT hr = _AudioClient->Initialize(AUDCLNT_SHAREMODE_SHARED, AUDCLNT_STREAMFLAGS_NOPERSIST, _EngineLatencyInMS*10000, 0, MixFormat(), NULL);
PersistentAssert(SUCCEEDED(hr), "_AudioClient->Initialize failed");
//
// Retrieve the buffer size for the audio client.
//
hr = _AudioClient->GetBufferSize(&_BufferSize);
PersistentAssert(SUCCEEDED(hr), "_AudioClient->GetBufferSize failed");
hr = _AudioClient->GetService(IID_PPV_ARGS(&_CaptureClient));
PersistentAssert(SUCCEEDED(hr), "_AudioClient->GetService failed");
return true;
}