本文整理汇总了C++中IAudioClient::Initialize方法的典型用法代码示例。如果您正苦于以下问题:C++ IAudioClient::Initialize方法的具体用法?C++ IAudioClient::Initialize怎么用?C++ IAudioClient::Initialize使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类IAudioClient
的用法示例。
在下文中一共展示了IAudioClient::Initialize方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: BlankAudioPlayback
BlankAudioPlayback(CTSTR lpDevice)
{
const CLSID CLSID_MMDeviceEnumerator = __uuidof(MMDeviceEnumerator);
const IID IID_IMMDeviceEnumerator = __uuidof(IMMDeviceEnumerator);
const IID IID_IAudioClient = __uuidof(IAudioClient);
const IID IID_IAudioRenderClient = __uuidof(IAudioRenderClient);
HRESULT err;
err = CoCreateInstance(CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, IID_IMMDeviceEnumerator, (void**)&mmEnumerator);
if(FAILED(err))
CrashError(TEXT("Could not create IMMDeviceEnumerator: 0x%08lx"), err);
if (scmpi(lpDevice, TEXT("Default")) == 0)
err = mmEnumerator->GetDefaultAudioEndpoint(eRender, eConsole, &mmDevice);
else
err = mmEnumerator->GetDevice(lpDevice, &mmDevice);
if(FAILED(err))
CrashError(TEXT("Could not create IMMDevice"));
err = mmDevice->Activate(IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&mmClient);
if(FAILED(err))
CrashError(TEXT("Could not create IAudioClient"));
WAVEFORMATEX *pwfx;
err = mmClient->GetMixFormat(&pwfx);
if(FAILED(err))
CrashError(TEXT("Could not get mix format from audio client"));
UINT inputBlockSize = pwfx->nBlockAlign;
err = mmClient->Initialize(AUDCLNT_SHAREMODE_SHARED, 0, ConvertMSTo100NanoSec(1000), 0, pwfx, NULL);
if(FAILED(err))
CrashError(TEXT("Could not initialize audio client, error = %08lX"), err);
err = mmClient->GetService(IID_IAudioRenderClient, (void**)&mmRender);
if(FAILED(err))
CrashError(TEXT("Could not get audio render client"));
//----------------------------------------------------------------
UINT bufferFrameCount;
err = mmClient->GetBufferSize(&bufferFrameCount);
if(FAILED(err))
CrashError(TEXT("Could not get audio buffer size"));
BYTE *lpData;
err = mmRender->GetBuffer(bufferFrameCount, &lpData);
if(FAILED(err))
CrashError(TEXT("Could not get audio buffer"));
zero(lpData, bufferFrameCount*inputBlockSize);
mmRender->ReleaseBuffer(bufferFrameCount, 0);//AUDCLNT_BUFFERFLAGS_SILENT); //probably better if it doesn't know
if(FAILED(mmClient->Start()))
CrashError(TEXT("Could not start audio source"));
}
示例2: initialize_patch
HRESULT __stdcall initialize_patch(
IAudioClient* this_, AUDCLNT_SHAREMODE ShareMode, DWORD StreamFlags,
REFERENCE_TIME hnsBufferDuration, REFERENCE_TIME hnsPeriodicity,
const WAVEFORMATEX* pFormat, LPCGUID AudioSessionGuid)
{
// synchronize initializing so it doesn't happen while streams are being flushed
HANDLE audio_router_mutex = OpenMutexW(SYNCHRONIZE, FALSE, L"Local\\audio-router-mutex");
assert(audio_router_mutex != NULL);
if(audio_router_mutex)
{
DWORD res = WaitForSingleObject(audio_router_mutex, INFINITE);
assert(res == WAIT_OBJECT_0);
}
IAudioClient* proxy = get_duplicate(this_)->proxy;
LPCGUID guid = ((GUID***)this_)[0][17];
DWORD_PTR* old_vftptr = swap_vtable(this_);
HRESULT hr = proxy->Initialize(
ShareMode,
StreamFlags |
AUDCLNT_SESSIONFLAGS_EXPIREWHENUNOWNED |
AUDCLNT_SESSIONFLAGS_DISPLAY_HIDEWHENEXPIRED,
hnsBufferDuration,
hnsPeriodicity,
pFormat,
guid);
((DWORD_PTR**)this_)[0] = old_vftptr;
if(hr != S_OK)
tell_error(hr);
else
*((WORD***)this_)[0][18] = pFormat->nBlockAlign;
if(hr == S_OK)
{
for(iaudioclient_duplicate* next = get_duplicate(this_)->next;
next != NULL; next = next->next)
{
HRESULT hr2 = next->proxy->Initialize(
ShareMode,
StreamFlags |
AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM |
AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY |
AUDCLNT_SESSIONFLAGS_EXPIREWHENUNOWNED |
AUDCLNT_SESSIONFLAGS_DISPLAY_HIDEWHENEXPIRED,
hnsBufferDuration,
hnsPeriodicity,
pFormat,
guid);
if(hr2 != S_OK)
tell_error(hr2);
}
}
ReleaseMutex(audio_router_mutex);
CloseHandle(audio_router_mutex);
return hr;
}
示例3: InitializeAudioEngine
//
// Initialize WASAPI in event driven mode, associate the audio client with our samples ready event handle, retrieve
// a capture client for the transport, create the capture thread and start the audio engine.
//
bool CWASAPICapture::InitializeAudioEngine()
{
HRESULT hr = _AudioClient->Initialize(AUDCLNT_SHAREMODE_SHARED, AUDCLNT_STREAMFLAGS_NOPERSIST, _EngineLatencyInMS*10000, 0, MixFormat(), NULL);
PersistentAssert(SUCCEEDED(hr), "_AudioClient->Initialize failed");
//
// Retrieve the buffer size for the audio client.
//
hr = _AudioClient->GetBufferSize(&_BufferSize);
PersistentAssert(SUCCEEDED(hr), "_AudioClient->GetBufferSize failed");
hr = _AudioClient->GetService(IID_PPV_ARGS(&_CaptureClient));
PersistentAssert(SUCCEEDED(hr), "_AudioClient->GetService failed");
return true;
}
示例4: Initialize
bool MMDeviceAudioSource::Initialize(bool bMic, CTSTR lpID)
{
const CLSID CLSID_MMDeviceEnumerator = __uuidof(MMDeviceEnumerator);
const IID IID_IMMDeviceEnumerator = __uuidof(IMMDeviceEnumerator);
const IID IID_IAudioClient = __uuidof(IAudioClient);
const IID IID_IAudioCaptureClient = __uuidof(IAudioCaptureClient);
HRESULT err;
err = CoCreateInstance(CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, IID_IMMDeviceEnumerator, (void**)&mmEnumerator);
if(FAILED(err))
{
AppWarning(TEXT("MMDeviceAudioSource::Initialize(%d): Could not create IMMDeviceEnumerator = %08lX"), (BOOL)bMic, err);
return false;
}
bIsMic = bMic;
if (bIsMic) {
BOOL bMicSyncFixHack = GlobalConfig->GetInt(TEXT("Audio"), TEXT("UseMicSyncFixHack"));
angerThreshold = bMicSyncFixHack ? 40 : 1000;
}
if (scmpi(lpID, TEXT("Default")) == 0)
err = mmEnumerator->GetDefaultAudioEndpoint(bMic ? eCapture : eRender, bMic ? eCommunications : eConsole, &mmDevice);
else
err = mmEnumerator->GetDevice(lpID, &mmDevice);
if(FAILED(err))
{
AppWarning(TEXT("MMDeviceAudioSource::Initialize(%d): Could not create IMMDevice = %08lX"), (BOOL)bMic, err);
return false;
}
err = mmDevice->Activate(IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&mmClient);
if(FAILED(err))
{
AppWarning(TEXT("MMDeviceAudioSource::Initialize(%d): Could not create IAudioClient = %08lX"), (BOOL)bMic, err);
return false;
}
//-----------------------------------------------------------------
// get name
IPropertyStore *store;
if(SUCCEEDED(mmDevice->OpenPropertyStore(STGM_READ, &store)))
{
PROPVARIANT varName;
PropVariantInit(&varName);
if(SUCCEEDED(store->GetValue(PKEY_Device_FriendlyName, &varName)))
{
CWSTR wstrName = varName.pwszVal;
strDeviceName = wstrName;
}
store->Release();
}
if(bMic)
{
Log(TEXT("------------------------------------------"));
Log(TEXT("Using auxilary audio input: %s"), GetDeviceName());
bUseQPC = GlobalConfig->GetInt(TEXT("Audio"), TEXT("UseMicQPC")) != 0;
if (bUseQPC)
Log(TEXT("Using Mic QPC timestamps"));
}
else
{
Log(TEXT("------------------------------------------"));
Log(TEXT("Using desktop audio input: %s"), GetDeviceName());
bUseVideoTime = AppConfig->GetInt(TEXT("Audio"), TEXT("SyncToVideoTime")) != 0;
SetTimeOffset(GlobalConfig->GetInt(TEXT("Audio"), TEXT("GlobalAudioTimeAdjust")));
}
//-----------------------------------------------------------------
// get format
WAVEFORMATEX *pwfx;
err = mmClient->GetMixFormat(&pwfx);
if(FAILED(err))
{
AppWarning(TEXT("MMDeviceAudioSource::Initialize(%d): Could not get mix format from audio client = %08lX"), (BOOL)bMic, err);
return false;
}
bool bFloat;
UINT inputChannels;
UINT inputSamplesPerSec;
UINT inputBitsPerSample;
UINT inputBlockSize;
DWORD inputChannelMask = 0;
WAVEFORMATEXTENSIBLE *wfext = NULL;
//the internal audio engine should always use floats (or so I read), but I suppose just to be safe better check
if(pwfx->wFormatTag == WAVE_FORMAT_EXTENSIBLE)
{
wfext = (WAVEFORMATEXTENSIBLE*)pwfx;
inputChannelMask = wfext->dwChannelMask;
//.........这里部分代码省略.........
示例5: PlayAudio
void PlayAudio()
{
REFERENCE_TIME hnsRequestedDuration = REFTIMES_PER_SEC; // microseconds, so this is 1 seconds
REFERENCE_TIME hnsActualDuration;
HRESULT hr;
IMMDeviceEnumerator *pEnumerator = NULL;
IMMDevice *pDevice = NULL;
IAudioClient *pAudioClient = NULL;
IAudioRenderClient *pRenderClient = NULL;
WAVEFORMATEX *pwfx = NULL;
UINT32 bufferFrameCount;
UINT32 numFramesAvailable;
UINT32 numFramesPadding;
BYTE *pData;
DWORD flags = 0;
hr = CoCreateInstance(CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, IID_IMMDeviceEnumerator, (void**)&pEnumerator);
EXIT_ON_ERROR(hr);
hr = pEnumerator->GetDefaultAudioEndpoint(
eRender, eConsole, &pDevice);
EXIT_ON_ERROR(hr);
hr = pDevice->Activate(
IID_IAudioClient, CLSCTX_ALL,
NULL, (void**)&pAudioClient);
EXIT_ON_ERROR(hr);
hr = pAudioClient->GetMixFormat(&pwfx);
EXIT_ON_ERROR(hr);
hr = pAudioClient->Initialize(
AUDCLNT_SHAREMODE_SHARED,
0,
hnsRequestedDuration,
0,
pwfx,
NULL);
EXIT_ON_ERROR(hr);
// Get the actual size of the allocated buffer.
hr = pAudioClient->GetBufferSize(&bufferFrameCount);
EXIT_ON_ERROR(hr);
hr = pAudioClient->GetService(
IID_IAudioRenderClient,
(void**)&pRenderClient);
EXIT_ON_ERROR(hr);
// Grab the entire buffer for the initial fill operation.
hr = pRenderClient->GetBuffer(bufferFrameCount, &pData);
EXIT_ON_ERROR(hr);
// load initial data
hr = LoadAudioBuffer(bufferFrameCount, pData, pwfx, &flags);
EXIT_ON_ERROR(hr);
hr = pRenderClient->ReleaseBuffer(bufferFrameCount, flags);
EXIT_ON_ERROR(hr);
// Calculate the actual duration of the allocated buffer.
hnsActualDuration = (REFERENCE_TIME)((double)REFTIMES_PER_SEC * bufferFrameCount / pwfx->nSamplesPerSec);
hr = pAudioClient->Start(); // Start playing.
EXIT_ON_ERROR(hr);
// Each loop fills about half of the shared buffer.
while (flags != AUDCLNT_BUFFERFLAGS_SILENT)
{
// Sleep for half the buffer duration.
Sleep((DWORD)(hnsActualDuration/REFTIMES_PER_MILLISEC/2));
// See how much buffer space is available.
hr = pAudioClient->GetCurrentPadding(&numFramesPadding);
EXIT_ON_ERROR(hr)
numFramesAvailable = bufferFrameCount - numFramesPadding;
// Grab all the available space in the shared buffer.
hr = pRenderClient->GetBuffer(numFramesAvailable, &pData);
EXIT_ON_ERROR(hr)
// Get next 1/2-second of data from the audio source.
hr = LoadAudioBuffer(numFramesAvailable, pData, pwfx, &flags);
EXIT_ON_ERROR(hr)
hr = pRenderClient->ReleaseBuffer(numFramesAvailable, flags);
EXIT_ON_ERROR(hr)
}
// Wait for last data in buffer to play before stopping.
Sleep((DWORD)(hnsActualDuration/REFTIMES_PER_MILLISEC/2));
hr = pAudioClient->Stop(); // Stop playing.
EXIT_ON_ERROR(hr);
//.........这里部分代码省略.........
示例6: Initialize
bool MMDeviceAudioSource::Initialize(bool bMic, CTSTR lpID)
{
const CLSID CLSID_MMDeviceEnumerator = __uuidof(MMDeviceEnumerator);
const IID IID_IMMDeviceEnumerator = __uuidof(IMMDeviceEnumerator);
const IID IID_IAudioClient = __uuidof(IAudioClient);
const IID IID_IAudioCaptureClient = __uuidof(IAudioCaptureClient);
HRESULT err;
err = CoCreateInstance(CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, IID_IMMDeviceEnumerator, (void**)&mmEnumerator);
if(FAILED(err))
{
AppWarning(TEXT("MMDeviceAudioSource::Initialize(%d): Could not create IMMDeviceEnumerator = %08lX"), (BOOL)bMic, err);
return false;
}
if(bMic)
err = mmEnumerator->GetDevice(lpID, &mmDevice);
else
err = mmEnumerator->GetDefaultAudioEndpoint(eRender, eConsole, &mmDevice);
if(FAILED(err))
{
AppWarning(TEXT("MMDeviceAudioSource::Initialize(%d): Could not create IMMDevice = %08lX"), (BOOL)bMic, err);
return false;
}
err = mmDevice->Activate(IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&mmClient);
if(FAILED(err))
{
AppWarning(TEXT("MMDeviceAudioSource::Initialize(%d): Could not create IAudioClient = %08lX"), (BOOL)bMic, err);
return false;
}
WAVEFORMATEX *pwfx;
err = mmClient->GetMixFormat(&pwfx);
if(FAILED(err))
{
AppWarning(TEXT("MMDeviceAudioSource::Initialize(%d): Could not get mix format from audio client = %08lX"), (BOOL)bMic, err);
return false;
}
String strName = GetDeviceName();
if(bMic)
{
Log(TEXT("------------------------------------------"));
Log(TEXT("Using auxilary audio input: %s"), strName.Array());
}
//the internal audio engine should always use floats (or so I read), but I suppose just to be safe better check
if(pwfx->wFormatTag == WAVE_FORMAT_EXTENSIBLE)
{
WAVEFORMATEXTENSIBLE *wfext = (WAVEFORMATEXTENSIBLE*)pwfx;
inputChannelMask = wfext->dwChannelMask;
if(wfext->SubFormat != KSDATAFORMAT_SUBTYPE_IEEE_FLOAT)
{
AppWarning(TEXT("MMDeviceAudioSource::Initialize(%d): Unsupported wave format"), (BOOL)bMic);
return false;
}
}
else if(pwfx->wFormatTag != WAVE_FORMAT_IEEE_FLOAT)
{
AppWarning(TEXT("MMDeviceAudioSource::Initialize(%d): Unsupported wave format"), (BOOL)bMic);
return false;
}
inputChannels = pwfx->nChannels;
inputBitsPerSample = 32;
inputBlockSize = pwfx->nBlockAlign;
inputSamplesPerSec = pwfx->nSamplesPerSec;
DWORD flags = bMic ? 0 : AUDCLNT_STREAMFLAGS_LOOPBACK;
err = mmClient->Initialize(AUDCLNT_SHAREMODE_SHARED, flags, ConvertMSTo100NanoSec(5000), 0, pwfx, NULL);
if(FAILED(err))
{
AppWarning(TEXT("MMDeviceAudioSource::Initialize(%d): Could not initialize audio client, result = %08lX"), (BOOL)bMic, err);
return false;
}
err = mmClient->GetService(IID_IAudioCaptureClient, (void**)&mmCapture);
if(FAILED(err))
{
AppWarning(TEXT("MMDeviceAudioSource::Initialize(%d): Could not get audio capture client, result = %08lX"), (BOOL)bMic, err);
return false;
}
err = mmClient->GetService(__uuidof(IAudioClock), (void**)&mmClock);
if(FAILED(err))
{
AppWarning(TEXT("MMDeviceAudioSource::Initialize(%d): Could not get audio capture clock, result = %08lX"), (BOOL)bMic, err);
return false;
}
CoTaskMemFree(pwfx);
//-------------------------------------------------------------------------
if(inputSamplesPerSec != 44100)
{
int errVal;
//.........这里部分代码省略.........
示例7: LoopbackCaptureFor
void LoopbackCaptureFor(IMMDevice* mmDevice, std::string filename, int secs)
{
// open new file
MMIOINFO mi = { 0 };
// some flags cause mmioOpen write to this buffer
// but not any that we're using
std::wstring wsFilename(filename.begin(), filename.end()); // mmioOpen wants a wstring
HMMIO file = mmioOpen(const_cast<LPWSTR>(wsFilename.c_str()), &mi, MMIO_WRITE | MMIO_CREATE);
time_t startTime = time(nullptr);
// activate an IAudioClient
IAudioClient* audioClient;
HRESULT hr = mmDevice->Activate(__uuidof(IAudioClient), CLSCTX_ALL, nullptr, (void**)&audioClient);
if (FAILED(hr))
{
fprintf(stderr, "IMMDevice::Activate(IAudioClient) failed: hr = 0x%08x", hr);
return;
}
// get the default device periodicity
REFERENCE_TIME hnsDefaultDevicePeriod;
hr = audioClient->GetDevicePeriod(&hnsDefaultDevicePeriod, nullptr);
if (FAILED(hr))
{
fprintf(stderr, "IAudioClient::GetDevicePeriod failed: hr = 0x%08x\n", hr);
audioClient->Release();
return;
}
// get the default device format
WAVEFORMATEX* waveform;
hr = audioClient->GetMixFormat(&waveform);
if (FAILED(hr))
{
fprintf(stderr, "IAudioClient::GetMixFormat failed: hr = 0x%08x\n", hr);
CoTaskMemFree(waveform);
audioClient->Release();
return;
}
// coerce int-16 wave format
// can do this in-place since we're not changing the size of the format
// also, the engine will auto-convert from float to int for us
switch (waveform->wFormatTag)
{
case WAVE_FORMAT_IEEE_FLOAT:
waveform->wFormatTag = WAVE_FORMAT_PCM;
waveform->wBitsPerSample = BITS_PER_SAMPLE;
waveform->nBlockAlign = BLOCK_ALIGN;
waveform->nAvgBytesPerSec = BYTE_RATE;
break;
case WAVE_FORMAT_EXTENSIBLE:
{
// naked scope for case-local variable
PWAVEFORMATEXTENSIBLE pEx = reinterpret_cast<PWAVEFORMATEXTENSIBLE>(waveform);
if (IsEqualGUID(KSDATAFORMAT_SUBTYPE_IEEE_FLOAT, pEx->SubFormat))
{
pEx->SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
pEx->Samples.wValidBitsPerSample = BITS_PER_SAMPLE;
waveform->wBitsPerSample = BITS_PER_SAMPLE;
waveform->nBlockAlign = waveform->nChannels * BYTE_PER_SAMPLE;
waveform->nAvgBytesPerSec = waveform->nBlockAlign * waveform->nSamplesPerSec;
}
break;
}
}
MMCKINFO ckRIFF = { 0 };
MMCKINFO ckData = { 0 };
hr = WriteWaveHeader(file, waveform, &ckRIFF, &ckData);
// create a periodic waitable timer
HANDLE hWakeUp = CreateWaitableTimer(nullptr, FALSE, nullptr);
UINT32 nBlockAlign = waveform->nBlockAlign;
// call IAudioClient::Initialize
// note that AUDCLNT_STREAMFLAGS_LOOPBACK and AUDCLNT_STREAMFLAGS_EVENTCALLBACK
// do not work together...
// the "data ready" event never gets set
// so we're going to do a timer-driven loop
hr = audioClient->Initialize(AUDCLNT_SHAREMODE_SHARED, AUDCLNT_STREAMFLAGS_LOOPBACK, 0, 0, waveform, 0);
if (FAILED(hr))
{
fprintf(stderr, "IAudioClient::Initialize failed: hr = 0x%08x\n", hr);
CloseHandle(hWakeUp);
audioClient->Release();
return;
}
// free up waveform
CoTaskMemFree(waveform);
// activate an IAudioCaptureClient
IAudioCaptureClient* audioCaptureClient;
hr = audioClient->GetService(__uuidof(IAudioCaptureClient), (void**)&audioCaptureClient);
// register with MMCSS
DWORD nTaskIndex = 0;
//.........这里部分代码省略.........
示例8: __uuidof
//.........这里部分代码省略.........
break;
default:
break;
}
pBuffer->SetAudioInfo(pwfx->nSamplesPerSec,pwfx->nBlockAlign,pwfx->nChannels,pwfx->wBitsPerSample,isFloat);
}
if (FAILED(hr)) {
// WriteWaveHeader does its own logging
CoTaskMemFree(pwfx);
pAudioClient->Release();
return hr;
}
// create a periodic waitable timer
HANDLE hWakeUp = CreateWaitableTimer(NULL, FALSE, NULL);
if (NULL == hWakeUp) {
DWORD dwErr = GetLastError();
printf("CreateWaitableTimer failed: last error = %u\n", dwErr);
CoTaskMemFree(pwfx);
pAudioClient->Release();
return HRESULT_FROM_WIN32(dwErr);
}
UINT32 nBlockAlign = pwfx->nBlockAlign;
UINT32 nChannels = pwfx->nChannels;
nFrames = 0;
// call IAudioClient::Initialize
// note that AUDCLNT_STREAMFLAGS_LOOPBACK and AUDCLNT_STREAMFLAGS_EVENTCALLBACK
// do not work together...
// the "data ready" event never gets set
// so we're going to do a timer-driven loop
hr = pAudioClient->Initialize(
AUDCLNT_SHAREMODE_SHARED,
AUDCLNT_STREAMFLAGS_LOOPBACK,
0, 0, pwfx, 0
);
if (FAILED(hr)) {
printf("IAudioClient::Initialize failed: hr = 0x%08x\n", hr);
CloseHandle(hWakeUp);
pAudioClient->Release();
return hr;
}
CoTaskMemFree(pwfx);
// activate an IAudioCaptureClient
IAudioCaptureClient *pAudioCaptureClient;
hr = pAudioClient->GetService(
__uuidof(IAudioCaptureClient),
(void**)&pAudioCaptureClient
);
if (FAILED(hr)) {
printf("IAudioClient::GetService(IAudioCaptureClient) failed: hr 0x%08x\n", hr);
CloseHandle(hWakeUp);
pAudioClient->Release();
return hr;
}
// register with MMCSS
DWORD nTaskIndex = 0;
HANDLE hTask = AvSetMmThreadCharacteristics(L"Capture", &nTaskIndex);
if (NULL == hTask) {
DWORD dwErr = GetLastError();
printf("AvSetMmThreadCharacteristics failed: last error = %u\n", dwErr);
pAudioCaptureClient->Release();
示例9: main
int main(int argc, char *argv[])
{
CoInitialize(nullptr);
listDevices();
IAudioClient *pAudioClient;
IMMDevice *device;
getDefaultDevice(&device);
HRESULT hr = device->Activate(__uuidof(IAudioClient),
CLSCTX_ALL, nullptr, (void**)&pAudioClient);
if (FAILED(hr)) {
printf("IMMDevice::Activate(IAudioClient) failed: hr = 0x%08x", hr);
return hr;
}
REFERENCE_TIME hnsDefaultDevicePeriod;
hr = pAudioClient->GetDevicePeriod(&hnsDefaultDevicePeriod, nullptr);
if (FAILED(hr)) {
printf("IAudioClient::GetDevicePeriod failed: hr = 0x%08x\n", hr);
pAudioClient->Release();
return hr;
}
// get the default device format
WAVEFORMATEX *pwfx;
hr = pAudioClient->GetMixFormat(&pwfx);
if (FAILED(hr)) {
printf("IAudioClient::GetMixFormat failed: hr = 0x%08x\n", hr);
CoTaskMemFree(pwfx);
pAudioClient->Release();
return hr;
}
DVAR(pwfx->wFormatTag);
DVAR(pwfx->wBitsPerSample);
DVAR(pwfx->nBlockAlign);
DVAR(pwfx->nAvgBytesPerSec);
switch (pwfx->wFormatTag) {
case WAVE_FORMAT_IEEE_FLOAT:
pwfx->wFormatTag = WAVE_FORMAT_PCM;
pwfx->wBitsPerSample = 16;
pwfx->nBlockAlign = pwfx->nChannels * pwfx->wBitsPerSample / 8;
pwfx->nAvgBytesPerSec = pwfx->nBlockAlign * pwfx->nSamplesPerSec;
break;
case WAVE_FORMAT_EXTENSIBLE:
{
// naked scope for case-local variable
PWAVEFORMATEXTENSIBLE pEx = reinterpret_cast<PWAVEFORMATEXTENSIBLE>(pwfx);
if (IsEqualGUID(KSDATAFORMAT_SUBTYPE_IEEE_FLOAT, pEx->SubFormat)) {
pEx->SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
pEx->Samples.wValidBitsPerSample = 16;
pwfx->wBitsPerSample = 16;
pwfx->nBlockAlign = pwfx->nChannels * pwfx->wBitsPerSample / 8;
pwfx->nAvgBytesPerSec = pwfx->nBlockAlign * pwfx->nSamplesPerSec;
} else {
printf("Don't know how to coerce mix format to int-16\n");
CoTaskMemFree(pwfx);
pAudioClient->Release();
return E_UNEXPECTED;
}
}
break;
default:
printf("Don't know how to coerce WAVEFORMATEX with wFormatTag = 0x%08x to int-16\n", pwfx->wFormatTag);
CoTaskMemFree(pwfx);
pAudioClient->Release();
return E_UNEXPECTED;
}
DVAR(pwfx->wFormatTag);
DVAR(pwfx->wBitsPerSample);
DVAR(pwfx->nBlockAlign);
DVAR(pwfx->nAvgBytesPerSec);
hr = pAudioClient->Initialize(AUDCLNT_SHAREMODE_SHARED, AUDCLNT_STREAMFLAGS_LOOPBACK, 0, 0, pwfx, 0 );
if (FAILED(hr)) {
printf("IAudioClient::Initialize failed: hr = 0x%08x\n", hr);
pAudioClient->Release();
return hr;
}
IAudioCaptureClient *pAudioCaptureClient;
hr = pAudioClient->GetService(__uuidof(IAudioCaptureClient), (void**)&pAudioCaptureClient);
if (FAILED(hr)) {
printf("IAudioClient::GetService(IAudioCaptureClient) failed: hr 0x%08x\n", hr);
pAudioClient->Release();
return hr;
}
hr = pAudioClient->Start();
if (FAILED(hr)) {
printf("IAudioClient::Start failed: hr = 0x%08x\n", hr);
pAudioCaptureClient->Release();
//.........这里部分代码省略.........
示例10: _tmain
int _tmain(int argc, _TCHAR* argv[]) {
IMMDeviceEnumerator *enumerator = 0;
IMMDevice *device = 0;
FILE *f;
f=fopen("c:/1.wav","w");
CoInitialize(0);
CoCreateInstance(__uuidof(MMDeviceEnumerator), NULL, CLSCTX_ALL, __uuidof(IMMDeviceEnumerator),
(void**) &enumerator);
enumerator->GetDefaultAudioEndpoint(eRender, eConsole, &device);
HANDLE processOutWrite, processOutRead, processInWrite, processInRead;
/*wchar_t processCommand[2000];
{
FILE* commandFile;
fopen_s(&commandFile, "command.txt", "r");
char cmd[2000];
fread(cmd, sizeof(char), 2000, commandFile);
fclose(commandFile);
size_t count;
mbstowcs_s(&count, processCommand, cmd, 2000);
}*/
/*{
//create pipes for plink process
SECURITY_ATTRIBUTES pipeAttributes = {0};
pipeAttributes.nLength = sizeof(SECURITY_ATTRIBUTES);
pipeAttributes.bInheritHandle = TRUE;
pipeAttributes.lpSecurityDescriptor= NULL;
CreatePipe(&processOutRead, &processOutWrite, &pipeAttributes, 0);
CreatePipe(&processInRead, &processInWrite, &pipeAttributes, 0);
STARTUPINFO startupInfo;
ZeroMemory(&startupInfo, sizeof(STARTUPINFO));
startupInfo.cb = sizeof(STARTUPINFO);
startupInfo.hStdError = processOutWrite;
startupInfo.hStdOutput = processOutWrite;
startupInfo.hStdInput = processInRead;
startupInfo.dwFlags |= STARTF_USESTDHANDLES;
PROCESS_INFORMATION processInfo = {0};
//launch process
CreateProcess(NULL, processCommand, NULL, NULL, TRUE, 0, NULL, NULL, &startupInfo, &processInfo);
//wait for plink to connect to minimze sound delay (magic number)
Sleep(2500);
}*/
HRESULT hr;
// activate an IAudioClient
IAudioClient *audioClient;
hr = device->Activate(__uuidof(IAudioClient), CLSCTX_ALL, NULL, (void**) &audioClient);
if (FAILED(hr)) {
printf("IMMDevice::Activate(IAudioClient) failed: hr = 0x%08x", hr);
return hr;
}
// get the default device format
WAVEFORMATEX *waveFormat;
hr = audioClient->GetMixFormat(&waveFormat);
if (FAILED(hr)) {
printf("IAudioClient::GetMixFormat failed: hr = 0x%08x\n", hr);
CoTaskMemFree(waveFormat);
audioClient->Release();
return hr;
}
// coerce int-16 wave format
// can do this in-place since we're not changing the size of the format
// also, the engine will auto-convert from float to int for us
switch (waveFormat->wFormatTag) {
case WAVE_FORMAT_IEEE_FLOAT:
waveFormat->wFormatTag = WAVE_FORMAT_PCM;
waveFormat->wBitsPerSample = 16;
waveFormat->nBlockAlign = waveFormat->nChannels * waveFormat->wBitsPerSample / 8;
waveFormat->nAvgBytesPerSec = waveFormat->nBlockAlign * waveFormat->nSamplesPerSec;
break;
case WAVE_FORMAT_EXTENSIBLE:
{
// naked scope for case-local variable
PWAVEFORMATEXTENSIBLE waveFormatEx = reinterpret_cast<PWAVEFORMATEXTENSIBLE>(waveFormat);
if (IsEqualGUID(KSDATAFORMAT_SUBTYPE_IEEE_FLOAT, waveFormatEx->SubFormat)) {
waveFormatEx->SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
waveFormatEx->Samples.wValidBitsPerSample = 16;
waveFormat->wBitsPerSample = 16;
waveFormat->nBlockAlign = waveFormat->nChannels * waveFormat->wBitsPerSample / 8;
waveFormat->nAvgBytesPerSec = waveFormat->nBlockAlign * waveFormat->nSamplesPerSec;
} else {
printf("Don't know how to coerce mix format to int-16\n");
CoTaskMemFree(waveFormat);
audioClient->Release();
return E_UNEXPECTED;
}
}
break;
default:
printf("Don't know how to coerce WAVEFORMATEX with wFormatTag = 0x%08x to int-16\n", waveFormat->wFormatTag);
CoTaskMemFree(waveFormat);
audioClient->Release();
//.........这里部分代码省略.........
示例11: LoopbackCapture
//.........这里部分代码省略.........
pAudioClient->Release();
return E_UNEXPECTED;
}
}
break;
default:
printf("Don't know how to coerce WAVEFORMATEX with wFormatTag = 0x%08x to int-16\n", pwfx->wFormatTag);
CoTaskMemFree(pwfx);
pAudioClient->Release();
return E_UNEXPECTED;
}
}
// create a periodic waitable timer
HANDLE hWakeUp = CreateWaitableTimer(NULL, FALSE, NULL);
if (NULL == hWakeUp) {
DWORD dwErr = GetLastError();
printf("CreateWaitableTimer failed: last error = %u\n", dwErr);
CoTaskMemFree(pwfx);
pAudioClient->Release();
return HRESULT_FROM_WIN32(dwErr);
}
UINT32 nBlockAlign = pwfx->nBlockAlign;
UINT32 nBufferSize;
*pnFrames = 0;
// call IAudioClient::Initialize
// note that AUDCLNT_STREAMFLAGS_LOOPBACK and AUDCLNT_STREAMFLAGS_EVENTCALLBACK
// do not work together...
// the "data ready" event never gets set
// so we're going to do a timer-driven loop
hr = pAudioClient->Initialize(
AUDCLNT_SHAREMODE_SHARED,
AUDCLNT_STREAMFLAGS_LOOPBACK,
0, 0, pwfx, 0
);
if (FAILED(hr)) {
printf("IAudioClient::Initialize failed: hr = 0x%08x\n", hr);
CloseHandle(hWakeUp);
pAudioClient->Release();
return hr;
}
CoTaskMemFree(pwfx);
// Get the buffer size
hr = pAudioClient->GetBufferSize(&nBufferSize);
if (FAILED(hr)) {
printf("IAudioClient::GetBufferSize failed: hr = 0x%08x\n", hr);
CloseHandle(hWakeUp);
pAudioClient->Release();
return hr;
}
// Configure the server. The buffer size returned is in frames
// so assume stereo, 16 bits per sample to convert from frames to bytes
server.configure(
bMono,
iSampleRateDivisor,
nBufferSize * 2 * 2);
// activate an IAudioCaptureClient
IAudioCaptureClient *pAudioCaptureClient;
hr = pAudioClient->GetService(
__uuidof(IAudioCaptureClient),