本文整理汇总了C++中IAMStreamConfig::GetFormat方法的典型用法代码示例。如果您正苦于以下问题:C++ IAMStreamConfig::GetFormat方法的具体用法?C++ IAMStreamConfig::GetFormat怎么用?C++ IAMStreamConfig::GetFormat使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类IAMStreamConfig
的用法示例。
在下文中一共展示了IAMStreamConfig::GetFormat方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: SetCaptureBufferSize
void CCaptureDevice::SetCaptureBufferSize(void)
{
IPin * pCapturePin = GetPin();
if (pCapturePin)
{
DWORD dwBytesPerSec = 0;
AM_MEDIA_TYPE * pmt = {0};
IAMStreamConfig * pCfg = NULL;
HRESULT hr = pCapturePin->QueryInterface(IID_IAMStreamConfig, (void **)&pCfg);
if ( hr==S_OK )
{
hr = pCfg->GetFormat(&pmt);
if ( hr==S_OK )
{
WAVEFORMATEX *pWF = (WAVEFORMATEX *) pmt->pbFormat;
dwBytesPerSec = pWF->nAvgBytesPerSec;
pWF->nChannels = 1;
pWF->wBitsPerSample = 8;
pWF->nSamplesPerSec = 11025;
pWF->nAvgBytesPerSec = pWF->nSamplesPerSec * pWF->nChannels * pWF->wBitsPerSample / 8;
pWF->nBlockAlign = 1;
/*
info.cbSize = sizeof(WAVEFORMATEX);
info.wFormatTag = 1;
info.nChannels = 2;
info.nSamplesPerSec = 44100;
//info.nSamplesPerSec = 22050;
11025
info.wBitsPerSample = 16;
info.nAvgBytesPerSec = info.nSamplesPerSec * info.nChannels * info.wBitsPerSample / 8;
info.nBlockAlign = 4;
*/
pCfg->SetFormat( pmt );
DeleteMediaType(pmt);
}
pCfg->Release();
}
/* if (dwBytesPerSec)
{
IAMBufferNegotiation * pNeg = NULL;
hr = pCapturePin->QueryInterface(IID_IAMBufferNegotiation,
(void **)&pNeg);
if (SUCCEEDED(hr))
{
ALLOCATOR_PROPERTIES AllocProp;
AllocProp.cbAlign = -1; // -1 means no preference.
AllocProp.cbBuffer = dwBytesPerSec * dwLatencyInMilliseconds / 1000;
AllocProp.cbPrefix = -1;
AllocProp.cBuffers = -1;
hr = pNeg->SuggestAllocatorProperties(&AllocProp);
pNeg->Release();
}
}*/
}
}
示例2: IniciarVentanaVideo
HRESULT Captura::IniciarVentanaVideo(HWND hWnd,int width, int height)
{
HRESULT hr;
RECT rcDest;
// CComPtr<IAMStreamConfig> pConfig;
IAMStreamConfig * pConfig;
IEnumMediaTypes *pMedia;
AM_MEDIA_TYPE *pmt = NULL, *pfnt = NULL;
hr = m_pCamOutPin->EnumMediaTypes( &pMedia );
if(SUCCEEDED(hr))
{
while(pMedia->Next(1, &pmt, 0) == S_OK)
{
if( pmt->formattype == FORMAT_VideoInfo )
{
VIDEOINFOHEADER *vih = (VIDEOINFOHEADER *)pmt->pbFormat;
if( vih->bmiHeader.biWidth == width && vih->bmiHeader.biHeight == height )
{
pfnt = pmt;
break;
}
BorrarTipoMedio( pmt );
}
}
pMedia->Release();
}
hr = m_pCamOutPin->QueryInterface( IID_IAMStreamConfig, (void **) &pConfig );
if(SUCCEEDED(hr))
{
if( pfnt != NULL )
{
hr=pConfig->SetFormat( pfnt );
BorrarTipoMedio( pfnt );
}
hr = pConfig->GetFormat( &pfnt );
if(SUCCEEDED(hr))
{
m_nAncho = ((VIDEOINFOHEADER *)pfnt->pbFormat)->bmiHeader.biWidth;
m_nAlto = ((VIDEOINFOHEADER *)pfnt->pbFormat)->bmiHeader.biHeight;
BorrarTipoMedio( pfnt );
}
}
::GetClientRect (hWnd,&rcDest);
hr = m_pWC->SetVideoPosition(NULL, &rcDest);
return hr;
}
示例3: InitVideoWindow
/* 设置捕获图像帧的格式,遍历所有格式是否有预定格式,若没有则以默认格式捕获 */
HRESULT CVMR_Capture::InitVideoWindow(HWND hWnd,int width, int height)
{
HRESULT hr;
RECT rcDest;
IAMStreamConfig *pConfig;
IEnumMediaTypes *pMedia;
AM_MEDIA_TYPE *pmt = NULL, *pfnt = NULL;
hr = m_pCamOutPin->EnumMediaTypes( &pMedia );
if(SUCCEEDED(hr))
{
//把所有视频的所有格式遍历一遍,看是否有预定的格式
while(pMedia->Next(1, &pmt, 0) == S_OK)
{
if( pmt->formattype == FORMAT_VideoInfo )
{
VIDEOINFOHEADER *vih = (VIDEOINFOHEADER *)pmt->pbFormat;
// 当前的格式是否与预定格式相同,即宽和高相同
if( vih->bmiHeader.biWidth == width && vih->bmiHeader.biHeight == height )
{
pfnt = pmt;
break;
}
DeleteMediaType( pmt );
}
}
pMedia->Release();
}
hr = m_pCamOutPin->QueryInterface( IID_IAMStreamConfig, (void **) &pConfig );
if(SUCCEEDED(hr))
{
// 有预定的格式
if( pfnt != NULL )
{
hr=pConfig->SetFormat( pfnt );
DeleteMediaType( pfnt );
}
// 没有预定的格式,读取缺省媒体格式
hr = pConfig->GetFormat( &pfnt );
if(SUCCEEDED(hr))
{
m_nWidth = ((VIDEOINFOHEADER *)pfnt->pbFormat)->bmiHeader.biWidth; //读取高
m_nHeight = ((VIDEOINFOHEADER *)pfnt->pbFormat)->bmiHeader.biHeight; //读取宽
DeleteMediaType( pfnt );
}
}
// 获取传入窗口的区域,以设置显示窗口
::GetClientRect (hWnd,&rcDest);
hr = m_pWC->SetVideoPosition(NULL, &rcDest);
return hr;
}
示例4: CoCreateInstance
/******************************Public*Routine******************************\
* BuildAndRender
*
* 采集卡源
\**************************************************************************/
HRESULT CVMR9Subgraph::BuildAndRender(IBaseFilter* pCap , GUID VidType ,UINT Width,UINT Height,int nFPS,IMultiVMR9Wizard* pWizard ,BOOL bUsingColorSpace,SourceConnectProc ConnectProc)
{
HRESULT hr = S_OK;
if(!pWizard)return E_FAIL;
if(!pCap)return E_FAIL;
if(m_pGraph) return E_FAIL;
IVMRFilterConfig9 * pConfig = NULL;
IGraphBuilder * pGb= NULL;
IBaseFilter * pColorSpace = 0;
IBaseFilter * pVMR9 = 0;
ICaptureGraphBuilder2 * pBuild = NULL;
//IBaseFilter* pCap = NULL;
IPin * pPin = NULL;
IAMStreamConfig *pStrCfig = 0;
AM_MEDIA_TYPE * mmt = 0;
//m_DeviceId = DeviceId;
m_GraphType = Capture_Device;
m_ConnectProc = ConnectProc;
CMediaHelper Helper;
// create graph
try
{
hr = CoCreateInstance( CLSID_FilterGraph, NULL, CLSCTX_INPROC_SERVER,
IID_IFilterGraph, (void**)&(m_pGraph) );
if( FAILED(hr))throw hr;if(!m_pGraph)throw E_OUTOFMEMORY;
//--
hr = m_pGraph->QueryInterface( IID_IGraphBuilder, (void**)&(pGb) );
if( FAILED(hr))throw hr;if(!pGb)throw E_OUTOFMEMORY;
//--
hr = CoCreateInstance(CLSID_CaptureGraphBuilder2, NULL, CLSCTX_INPROC, IID_ICaptureGraphBuilder2, (void **)&pBuild);
if( FAILED(hr))throw hr;if(!pBuild)throw E_OUTOFMEMORY;
//--
hr =pBuild->SetFiltergraph(pGb);
if( FAILED(hr))throw hr;
//--
// create and add VMR9
hr = CoCreateInstance( CLSID_VideoMixingRenderer9, NULL, CLSCTX_INPROC,
IID_IBaseFilter, (void**)&(pVMR9) );
if( FAILED(hr))throw hr;if(! pVMR9)throw E_OUTOFMEMORY;
//--
hr = m_pGraph->AddFilter( pVMR9, L"VMR9");
if( FAILED(hr))throw hr;
//--
hr = pVMR9->QueryInterface( IID_IVMRFilterConfig9, (void**)&(pConfig) );
if( FAILED(hr))throw hr;
//--
// set VMR to the renderless mode
hr = pConfig->SetRenderingMode( VMR9Mode_Renderless );
//--
hr = pWizard->Attach( pVMR9,D3DFMT_UNKNOWN, &m_dwID );
if( FAILED(hr))throw hr;
if(bUsingColorSpace){
hr = CoCreateInstance( CLSID_Colour, NULL, CLSCTX_INPROC,
IID_IBaseFilter, (void**)&(pColorSpace) );
if( FAILED(hr))throw hr;if( !pColorSpace)throw E_OUTOFMEMORY;
//--
hr = m_pGraph->AddFilter( pColorSpace, L"ColorSpace");
if( FAILED(hr))throw hr;
}
////连接采集卡
//hr = Helper.GetVidCapDevice(DeviceId,&pCap);
//if( FAILED(hr))throw hr;
//if(!pCap)throw E_OUTOFMEMORY;
hr = m_pGraph->AddFilter(pCap,L"Capture");
if( FAILED(hr))throw hr;
if(m_ConnectProc) {
hr = m_ConnectProc(m_dwID,m_pGraph,pCap,pVMR9);
}
else{//使用默认连接方法
hr = pBuild->FindInterface(&PIN_CATEGORY_CAPTURE,&MEDIATYPE_Interleaved,pCap,IID_IAMStreamConfig,(void **)&pStrCfig);
if( FAILED( hr) ){
hr = pBuild->FindInterface(&PIN_CATEGORY_CAPTURE,&MEDIATYPE_Video,pCap,IID_IAMStreamConfig,(void **)&pStrCfig);
if (FAILED(hr ))throw hr;
}
hr = pStrCfig->GetFormat(&mmt);
if(mmt->formattype == FORMAT_VideoInfo){
if (FAILED(hr ))throw hr;
if(!mmt) throw E_OUTOFMEMORY;
VIDEOINFO *pvi = (VIDEOINFO *) mmt->pbFormat;
pvi->AvgTimePerFrame = UNITS/nFPS;
pvi->bmiHeader.biWidth = Width;
pvi->bmiHeader.biHeight = Height;
mmt->subtype = VidType;
// hr=pStrCfig->SetFormat(mmt); //重新设置参数
if( FAILED(hr))throw hr;
}
//.........这里部分代码省略.........
示例5: OpenCamera
bool CCameraDS::OpenCamera(int nCamID, bool bDisplayProperties, int nWidth, int nHeight)
{
HRESULT hr = S_OK;
CoInitialize(NULL);
// Create the Filter Graph Manager.
hr = CoCreateInstance(CLSID_FilterGraph, NULL, CLSCTX_INPROC,
IID_IGraphBuilder, (void **)&m_pGraph);
hr = CoCreateInstance(CLSID_SampleGrabber, NULL, CLSCTX_INPROC_SERVER,
IID_IBaseFilter, (LPVOID *)&m_pSampleGrabberFilter);
hr = m_pGraph->QueryInterface(IID_IMediaControl, (void **) &m_pMediaControl);
hr = m_pGraph->QueryInterface(IID_IMediaEvent, (void **) &m_pMediaEvent);
hr = CoCreateInstance(CLSID_NullRenderer, NULL, CLSCTX_INPROC_SERVER,
IID_IBaseFilter, (LPVOID*) &m_pNullFilter);
hr = m_pGraph->AddFilter(m_pNullFilter, L"NullRenderer");
hr = m_pSampleGrabberFilter->QueryInterface(IID_ISampleGrabber, (void**)&m_pSampleGrabber);
AM_MEDIA_TYPE mt;
ZeroMemory(&mt, sizeof(AM_MEDIA_TYPE));
mt.majortype = MEDIATYPE_Video;
mt.subtype = MEDIASUBTYPE_RGB24;
mt.formattype = FORMAT_VideoInfo;
hr = m_pSampleGrabber->SetMediaType(&mt);
MYFREEMEDIATYPE(mt);
m_pGraph->AddFilter(m_pSampleGrabberFilter, L"Grabber");
// Bind Device Filter. We know the device because the id was passed in
BindFilter(nCamID, &m_pDeviceFilter);
m_pGraph->AddFilter(m_pDeviceFilter, NULL);
CComPtr<IEnumPins> pEnum;
m_pDeviceFilter->EnumPins(&pEnum);
hr = pEnum->Reset();
hr = pEnum->Next(1, &m_pCameraOutput, NULL);
pEnum = NULL;
m_pSampleGrabberFilter->EnumPins(&pEnum);
pEnum->Reset();
hr = pEnum->Next(1, &m_pGrabberInput, NULL);
pEnum = NULL;
m_pSampleGrabberFilter->EnumPins(&pEnum);
pEnum->Reset();
pEnum->Skip(1);
hr = pEnum->Next(1, &m_pGrabberOutput, NULL);
pEnum = NULL;
m_pNullFilter->EnumPins(&pEnum);
pEnum->Reset();
hr = pEnum->Next(1, &m_pNullInputPin, NULL);
//SetCrossBar();
if (bDisplayProperties)
{
CComPtr<ISpecifyPropertyPages> pPages;
HRESULT hr = m_pCameraOutput->QueryInterface(IID_ISpecifyPropertyPages, (void**)&pPages);
if (SUCCEEDED(hr))
{
PIN_INFO PinInfo;
m_pCameraOutput->QueryPinInfo(&PinInfo);
CAUUID caGUID;
pPages->GetPages(&caGUID);
OleCreatePropertyFrame(NULL, 0, 0,
L"Property Sheet", 1,
(IUnknown **)&(m_pCameraOutput.p),
caGUID.cElems,
caGUID.pElems,
0, 0, NULL);
CoTaskMemFree(caGUID.pElems);
PinInfo.pFilter->Release();
}
pPages = NULL;
}
else
{
//////////////////////////////////////////////////////////////////////////////
// 加入由 lWidth和lHeight设置的摄像头的宽和高 的功能,默认320*240
// by flymanbox @2009-01-24
//////////////////////////////////////////////////////////////////////////////
int _Width = nWidth, _Height = nHeight;
IAMStreamConfig* iconfig;
iconfig = NULL;
hr = m_pCameraOutput->QueryInterface(IID_IAMStreamConfig, (void**)&iconfig);
AM_MEDIA_TYPE* pmt;
if(iconfig->GetFormat(&pmt) !=S_OK)
{
//.........这里部分代码省略.........
示例6: LoadFilters
//.........这里部分代码省略.........
}
}
int soundTimeOffset = data->GetInt(TEXT("soundTimeOffset"));
//GetOutputList(devicePin, outputList);
//------------------------------------------------
// initialize the basic video variables and data
//------------------------------------------------
// log audio info
{
String strTest;
strTest = FormattedString(TEXT(" audio device: %s,\r\n audio device id %s,\r\n"), strAudioDevice.Array(), strAudioID.Array());
Log(TEXT("------------------------------------------"));
Log(strTest.Array());
}
//------------------------------------------------
// get audio pin configuration, optionally configure audio pin to 44100
GUID expectedAudioType;
if(soundOutputType == 1)
{
IAMStreamConfig *audioConfig;
if(SUCCEEDED(audioPin->QueryInterface(IID_IAMStreamConfig, (void**)&audioConfig)))
{
AM_MEDIA_TYPE *audioMediaType;
if(SUCCEEDED(err = audioConfig->GetFormat(&audioMediaType)))
{
SetAudioInfo(audioMediaType, expectedAudioType);
//Log(TEXT("Fixed size samples: %s\r\n"), (audioMediaType->bFixedSizeSamples) ? "Yes" : "No");
//Log(TEXT("Temporal Compression: %s\r\n"), (audioMediaType->bTemporalCompression) ? "Yes" : "No");
//Log(TEXT("cbFormat: %.16X\r\n"), audioMediaType->cbFormat);
//Log(TEXT("Sample size: %u\r\n"), audioMediaType->lSampleSize);
}
else if(err == E_NOTIMPL) //elgato probably
{
IEnumMediaTypes *audioMediaTypes;
if(SUCCEEDED(err = audioPin->EnumMediaTypes(&audioMediaTypes)))
{
ULONG i = 0;
if((err = audioMediaTypes->Next(1, &audioMediaType, &i)) == S_OK)
SetAudioInfo(audioMediaType, expectedAudioType);
else
{
AppWarning(TEXT("DShowAudioPlugin: audioMediaTypes->Next failed, result = %08lX"), err);
soundOutputType = 0;
}
audioMediaTypes->Release();
}
else
{
AppWarning(TEXT("DShowAudioPlugin: audioMediaTypes->Next failed, result = %08lX"), err);
soundOutputType = 0;
}
}
else
{
AppWarning(TEXT("DShowAudioPlugin: Could not get audio format, result = %08lX"), err);
示例7: if
//.........这里部分代码省略.........
{
return -14;
}
hr = s->m_pGraph->AddFilter(s->m_pIDXFilter, L"DXFilter Filter");
if(FAILED(hr))
{
return -15;
}
// get null renderer
hr=CoCreateInstance (CLSID_NullRenderer,
NULL,
CLSCTX_INPROC_SERVER,
IID_IBaseFilter,
(void **)&s->m_pNullRenderer);
if(FAILED(hr))
{
return -16;
}
if (s->m_pNullRenderer!=NULL)
{
s->m_pGraph->AddFilter(s->m_pNullRenderer, L"Null Renderer");
}
hr = s->m_pBuilder->RenderStream(&pPinCategory,
&MEDIATYPE_Video, s->m_pDeviceFilter, s->m_pIDXFilter, s->m_pNullRenderer);
if (FAILED(hr))
{
return -17;
}
IAMStreamConfig *pConfig = NULL;
hr = s->m_pBuilder->FindInterface(
&pPinCategory, // Preview pin.
&MEDIATYPE_Video, // Any media type.
s->m_pDeviceFilter, // Pointer to the capture filter.
IID_IAMStreamConfig, (void**)&pConfig);
if (pConfig!=NULL)
{
AM_MEDIA_TYPE *pType = NULL;
int iCount, iSize;
pConfig->GetNumberOfCapabilities(&iCount, &iSize);
for (int i = 0; i < iCount; i++) {
VIDEO_STREAM_CONFIG_CAPS scc;
pType = NULL;
pConfig->GetStreamCaps(i, &pType, (BYTE *)&scc);
if (!((pType->formattype == FORMAT_VideoInfo) &&
(pType->cbFormat >= sizeof(VIDEOINFOHEADER)) &&
(pType->pbFormat != NULL)))
continue;
VIDEOINFOHEADER & videoInfo = *(VIDEOINFOHEADER *)pType->pbFormat;
if (m != pType->subtype)
continue;
if (videoInfo.bmiHeader.biWidth != s->vsize.width)
continue;
if (videoInfo.bmiHeader.biHeight != s->vsize.height)
continue;
if (videoInfo.bmiHeader.biBitCount != pvi->bmiHeader.biBitCount)
continue;
if (videoInfo.bmiHeader.biCompression != pvi->bmiHeader.biCompression)
continue;
videoInfo.AvgTimePerFrame = UNITS / (LONGLONG)s->fps;
pConfig->SetFormat(pType);
}
pConfig->GetFormat(&pType);
if (pType!=NULL)
{
VIDEOINFO *pvi;
pvi = (VIDEOINFO *)pType->pbFormat;
ms_message("v4w: camera asked fps=%.2f // real fps=%.2f", s->fps, ((float)UNITS / (float)pvi->AvgTimePerFrame));
}
pConfig->Release();
}
//m_pDXFilter->SetBufferSamples(TRUE);
s_callback = s;
hr = s->m_pControl->Run();
if(FAILED(hr))
{
return -18;
}
s->rotregvalue=1;
return 0;
}
示例8: SetAudioFormat
/// 设置音频信息
BOOL CAudioCapture::SetAudioFormat(ENUM_FREQUENCY_TYPE enFrequency,
ENUM_CHANNEL_TYPE enChannel, ENUM_SAMPLE_TYPE enSample)
{
if(NULL != m_pCaptureFilter)
{
BOOL bResult = FALSE;
do
{
IPin* pOutPin = GetOutputPin(m_pCaptureFilter, (uint16_t)0);
if(NULL != pOutPin)
{
IAMBufferNegotiation *pNeg = NULL;
IAMStreamConfig *pCfg = NULL;
// Get buffer negotiation interface
HRESULT hr = pOutPin->QueryInterface(IID_IAMBufferNegotiation, (void **)&pNeg);
if (FAILED(hr))
{
pOutPin->Release();
break;
}
// Find number of bytes in one second
long lBytesPerSecond = (long) (enSample * enFrequency * enChannel);
// 针对FAAC编码器 做出的调整
long lBufferSize = 1024 * enSample * enChannel;
// Set the buffer size based on selected settings
ALLOCATOR_PROPERTIES prop={0};
prop.cbBuffer = lBufferSize;
prop.cBuffers = 6;
prop.cbAlign = enSample * enChannel;
hr = pNeg->SuggestAllocatorProperties(&prop);
pNeg->Release();
// Now set the actual format of the audio data
hr = pOutPin->QueryInterface(IID_IAMStreamConfig, (void **)&pCfg);
if (FAILED(hr))
{
pOutPin->Release();
break;
}
// Read current media type/format
AM_MEDIA_TYPE *pmt={0};
hr = pCfg->GetFormat(&pmt);
if (SUCCEEDED(hr))
{
// Fill in values for the new format
WAVEFORMATEX *pWF = (WAVEFORMATEX *) pmt->pbFormat;
pWF->nChannels = (WORD) enChannel;
pWF->nSamplesPerSec = enFrequency;
pWF->nAvgBytesPerSec = lBytesPerSecond;
pWF->wBitsPerSample = (WORD) (enSample * 8);
pWF->nBlockAlign = (WORD) (enSample * enChannel);
// Set the new formattype for the output pin
hr = pCfg->SetFormat(pmt);
UtilDeleteMediaType(pmt);
}
// Release interfaces
pCfg->Release();
pOutPin->Release();
bResult = TRUE;
}
}while(FALSE);
return bResult;
}
else
{
m_enFrequency = enFrequency;
m_enChannel = enChannel;
m_enSample = enSample;
return TRUE;
}
}
示例9: main
int main(int argc, char* argv[])
{
ICaptureGraphBuilder2 *pCaptureGraphBuilder = NULL;
IGraphBuilder *pGraphBuilder = NULL;
IBaseFilter *pSource = NULL;
IBaseFilter *pMux = NULL;
IBaseFilter *pVideoCompressor = NULL;
IBaseFilter *pAudioCompressor = NULL;
IAMStreamConfig *pAMStreamConfig = NULL;
IAMVideoCompression *pAMVideoCompression = NULL;
IMediaControl *pControl = NULL;
IMediaSeeking *pSeek = NULL;
IMediaEvent *pEvent = NULL;
HRESULT hr;
DWORD pdwRegister=0;
CoInitialize(NULL);
// Create the capture graph builder.
CoCreateInstance(CLSID_CaptureGraphBuilder2, NULL, CLSCTX_INPROC,
IID_ICaptureGraphBuilder2, (void **)&pCaptureGraphBuilder);
// Make the rendering section of the graph.
pCaptureGraphBuilder->SetOutputFileName(
&MEDIASUBTYPE_Avi, // File type.
L"C:\\STDIUE1.avi", // File name.
&pMux, // pointer to the multiplexer.
NULL); // pointer to the file writer.
// Load the source file.
pCaptureGraphBuilder->GetFiltergraph(&pGraphBuilder);
pGraphBuilder->AddSourceFilter(L"C:\\Program Files\\Microsoft Money\\Media\\STDIUE1.avi", L"Source Filter", &pSource);
// Add the compressor filter.
CoCreateInstance(CLSID_AVICo, NULL, CLSCTX_INPROC,
IID_IBaseFilter, (void **)&pVideoCompressor);
pGraphBuilder->AddFilter(pVideoCompressor, L"Video Compressor");
// Render the video stream, through the compressor.
pCaptureGraphBuilder->RenderStream(
NULL, // Output pin category
NULL, // Media type
pSource, // Source filter
pVideoCompressor, // Compressor filter
pMux); // Sink filter (the AVI Mux)
/* CoCreateInstance(CLSID_GSM, NULL, CLSCTX_INPROC,
IID_IBaseFilter, (void **)&pAudioCompressor);
pGraphBuilder->AddFilter(pAudioCompressor, L"Audio Compressor");*/
// Render the audio stream.
pCaptureGraphBuilder->RenderStream(
NULL,
NULL,
pSource,
pAudioCompressor,
pMux);
// Compress at 100k/second data rate.
AM_MEDIA_TYPE *pmt;
pCaptureGraphBuilder->FindInterface(NULL, NULL, pVideoCompressor, IID_IAMStreamConfig, (void **)&pAMStreamConfig);
pAMStreamConfig->GetFormat(&pmt);
if (pmt->formattype == FORMAT_VideoInfo)
{
((VIDEOINFOHEADER *)(pmt->pbFormat))->dwBitRate = 100000;
pAMStreamConfig->SetFormat(pmt);
}
// Request key frames every four frames.
pAMStreamConfig->QueryInterface(IID_IAMVideoCompression, (void **)&pAMVideoCompression);
pAMVideoCompression->put_KeyFrameRate(4);
pAMVideoCompression->Release();
pAMStreamConfig->Release();
// Run the graph.
pGraphBuilder->QueryInterface(IID_IMediaControl, (void **)&pControl);
pGraphBuilder->QueryInterface(IID_IMediaEvent, (void **)&pEvent);
hr = pMux->QueryInterface(IID_IMediaSeeking, (void**)&pSeek);
pControl->Run();
printf("Recompressing... \n");
long evCode;
if (SUCCEEDED(hr))
{
REFERENCE_TIME rtTotal, rtNow = 0;
pSeek->GetDuration(&rtTotal);
while ((pEvent->WaitForCompletion(1000, &evCode)) == E_ABORT)
{
//.........这里部分代码省略.........
示例10: LoadFilters
//.........这里部分代码省略.........
AM_MEDIA_TYPE outputMediaType;
CopyMediaType(&outputMediaType, bestOutput->mediaType);
VIDEOINFOHEADER *vih = reinterpret_cast<VIDEOINFOHEADER*>(outputMediaType.pbFormat);
BITMAPINFOHEADER *bmi = GetVideoBMIHeader(&outputMediaType);
vih->AvgTimePerFrame = frameInterval;
bmi->biWidth = renderCX;
bmi->biHeight = renderCY;
bmi->biSizeImage = renderCX*renderCY*(bmi->biBitCount>>3);
if(FAILED(err = config->SetFormat(&outputMediaType)))
{
if(err != E_NOTIMPL)
{
AppWarning(TEXT("DShowPlugin: SetFormat on device pin failed, result = %08lX"), err);
goto cleanFinish;
}
}
FreeMediaType(outputMediaType);
//------------------------------------------------
// get audio pin configuration, optionally configure audio pin to 44100
GUID expectedAudioType;
if(soundOutputType == 1)
{
IAMStreamConfig *audioConfig;
if(SUCCEEDED(audioPin->QueryInterface(IID_IAMStreamConfig, (void**)&audioConfig)))
{
AM_MEDIA_TYPE *audioMediaType;
if(SUCCEEDED(err = audioConfig->GetFormat(&audioMediaType)))
{
SetAudioInfo(audioMediaType, expectedAudioType);
}
else if(err == E_NOTIMPL) //elgato probably
{
IEnumMediaTypes *audioMediaTypes;
if(SUCCEEDED(err = audioPin->EnumMediaTypes(&audioMediaTypes)))
{
ULONG i = 0;
if((err = audioMediaTypes->Next(1, &audioMediaType, &i)) == S_OK)
SetAudioInfo(audioMediaType, expectedAudioType);
else
{
AppWarning(TEXT("DShowPlugin: audioMediaTypes->Next failed, result = %08lX"), err);
soundOutputType = 0;
}
audioMediaTypes->Release();
}
else
{
AppWarning(TEXT("DShowPlugin: audioMediaTypes->Next failed, result = %08lX"), err);
soundOutputType = 0;
}
}
else
{
AppWarning(TEXT("DShowPlugin: Could not get audio format, result = %08lX"), err);
soundOutputType = 0;
}
audioConfig->Release();
示例11: if
AM_MEDIA_TYPE * CCaptureDevice::SelectMediaType(void)
{
// Preferred sequence: UYVY, YUY2, RGB565, RGB555, RGB24, RGB32
VIDEO_STREAM_CONFIG_CAPS pSCC;
AM_MEDIA_TYPE * pmt = NULL;
HRESULT hr = S_OK;
int nCounts=0, nSize=0;
int preferredIndex = -1;
enum {
UYVY = 0, YUY2, RGB565, RGB555, RGB24, RGB32, Unknown
} currentPreferred, temp;
currentPreferred = Unknown;
IAMStreamConfig * pCfg = GetStreamConfig();
pCfg->GetNumberOfCapabilities(&nCounts, &nSize);
for (int i = 0; i < nCounts; i++)
{
if (pCfg->GetStreamCaps(i, &pmt, (BYTE *)&pSCC) == S_OK)
{
if (pmt->subtype == MEDIASUBTYPE_RGB32)
{
temp = RGB32;
}
else if (pmt->subtype == MEDIASUBTYPE_RGB24)
{
temp = RGB24;
}
else if (pmt->subtype == MEDIASUBTYPE_RGB565)
{
temp = RGB565;
}
else if (pmt->subtype == MEDIASUBTYPE_RGB555)
{
temp = RGB555;
}
else if (pmt->subtype == MEDIASUBTYPE_YUY2)
{
temp = YUY2;
}
else if (pmt->subtype == MEDIASUBTYPE_UYVY)
{
temp = UYVY;
}
else
{
temp = Unknown;
}
if (temp < currentPreferred)
{
currentPreferred = temp;
preferredIndex = i;
}
DeleteMediaType(pmt);
}
}
// Get the preferred media type
if (preferredIndex != -1)
{
hr = pCfg->GetStreamCaps(preferredIndex, &pmt, (BYTE *)&pSCC);
}
else
{
hr = pCfg->GetFormat(&pmt);
}
return pmt;
}
示例12: APlaying
BOOL CBoxView::APlaying()
{
if (m_pGraph == NULL)
{
return FALSE;
}
CComPtr<IBaseFilter> pAudioInputFilter;
HRESULT hr = S_OK;
//查找audio filter并加入graph
hr = FindInputFilters((void**)&pAudioInputFilter, CLSID_AudioInputDeviceCategory);
if (NULL == pAudioInputFilter)
{
TRACE(L"[SVC] CBoxView:: Could not create the Filter AudioInputFilter");
return FALSE;
}
CComPtr<IPin> pinIn;
CComPtr<IAMAudioInputMixer> pPinMixer;
GetUnconnectedPin(pAudioInputFilter, PINDIR_INPUT, &pinIn);
hr = pinIn->QueryInterface(IID_IAMAudioInputMixer, (void **)&pPinMixer);
if (SUCCEEDED(hr))
{
hr = pPinMixer->put_Enable(TRUE);
}
hr = m_pGraph->AddFilter(pAudioInputFilter, L"ACapture");
//创建render filter并加入graph
CComPtr <IBaseFilter> pAudioRenderer = NULL;
hr = CoCreateInstance(CLSID_AudioRender, NULL, CLSCTX_INPROC_SERVER, IID_IBaseFilter, (void **)&pAudioRenderer);
hr = m_pGraph->AddFilter(pAudioRenderer, L"Audio Renderer");
if (pAudioRenderer == NULL)
{
TRACE(L"[SVC] CBoxView:: Could not create the Filter AudioRenderer");
return FALSE;
}
//获取麦克风输出脚
CComPtr<IPin> pAudioOutput;
CComPtr<IEnumPins> pEnum;
pAudioInputFilter->EnumPins(&pEnum);
hr = pEnum->Reset();
hr = pEnum->Next(1, &pAudioOutput, NULL);
//设置麦克风输出脚
IAMStreamConfig *pCfg = NULL;
hr = pAudioOutput->QueryInterface(IID_IAMStreamConfig, (void **)&pCfg);
// Read current media type/format
AM_MEDIA_TYPE *pmt = { 0 };
hr = pCfg->GetFormat(&pmt);
WAVEFORMATEX *pWF = (WAVEFORMATEX *)pmt->pbFormat;
// Release interfaces
pCfg->Release();
//设置麦克风输出脚缓冲
IAMBufferNegotiation *pNeg;
pAudioOutput->QueryInterface(IID_IAMBufferNegotiation, (void **)&pNeg);
ALLOCATOR_PROPERTIES prop = { 0 };
prop.cbBuffer = pWF->nAvgBytesPerSec * 50 / 1000;
prop.cBuffers = -1;
prop.cbAlign = -1;
prop.cbPrefix = -1;
hr = pNeg->SuggestAllocatorProperties(&prop);
pNeg->Release();
#if 0
//method 1:Render RUN
hr = m_pCGB->RenderStream(&PIN_CATEGORY_PREVIEW,
&MEDIATYPE_Audio,
pAudioInputFilter,
NULL,
NULL);
#else
//method 2:Connect RUN
hr = ConnectFilters(m_pGraph, pAudioOutput, pAudioRenderer);
#endif
return FAILED(hr);
}
示例13: init
// use cameraID 1 for first and so on
HRESULT VideoTexture::init(int cameraID)
{
if (cameraID <= 0) return S_FALSE;
glEnable(GL_TEXTURE_2D);
// Texture -> This will be put into the camera module
glGenTextures(1, textures); // Create The Texture
// Typical Texture Generation Using Data From The Bitmap
for (int i = 0; i < 1; i++)
{
//glActiveTexture(GL_TEXTURE0 + i);
glBindTexture(GL_TEXTURE_2D, textures[i]);
// Generate The Texture (640x480... make changeable!)
//glTexImage2D(GL_TEXTURE_2D, 0, 3, 640, 480, 0, GL_RGB, GL_UNSIGNED_BYTE, ...THe data111!!!);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR); // Linear Filtering
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR); // Linear Filtering
// Enable Texture Mapping
glTexImage2D(GL_TEXTURE_2D, 0, 3, TEXTURE_WIDTH, TEXTURE_HEIGHT, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
}
// Video stuff:
// Create captue graph builder:
HRESULT hr = InitCaptureGraphBuilder(&pGraph, &pBuild);
if (FAILED(hr)) return hr;
IEnumMoniker *enumerator;
hr = EnumerateDevices(CLSID_VideoInputDeviceCategory, &enumerator);
//DisplayDeviceInformation(enumerator);
// Take the first camera:
IMoniker *pMoniker = NULL;
for (int i = 0; i < cameraID; i++)
{
enumerator->Next(1, &pMoniker, NULL);
}
IBaseFilter *pCap = NULL;
hr = pMoniker->BindToObject(0, 0, IID_IBaseFilter, (void**)&pCap);
if (SUCCEEDED(hr))
{
hr = pGraph->AddFilter(pCap, L"Capture Filter");
if (FAILED(hr)) return hr;
}
else return hr;
// Create the Sample Grabber which we will use
// To take each frame for texture generation
hr = CoCreateInstance(CLSID_SampleGrabber, NULL, CLSCTX_INPROC_SERVER,
IID_ISampleGrabber, (void **)&pGrabber);
if (FAILED(hr)) return hr;
hr = pGrabber->QueryInterface(IID_IBaseFilter, (void **)&pGrabberBase);
// We have to set the 24-bit RGB desire here
// So that the proper conversion filters
// Are added automatically.
AM_MEDIA_TYPE desiredType;
memset(&desiredType, 0, sizeof(desiredType));
desiredType.majortype = MEDIATYPE_Video;
desiredType.subtype = MEDIASUBTYPE_RGB24;
desiredType.formattype = FORMAT_VideoInfo;
pGrabber->SetMediaType(&desiredType);
pGrabber->SetBufferSamples(TRUE);
// add to Graph
pGraph->AddFilter(pGrabberBase, L"Grabber");
/* Null render filter */
hr = CoCreateInstance(CLSID_NullRenderer, NULL, CLSCTX_INPROC_SERVER, IID_IBaseFilter, (void**)&pNullRender);
if(FAILED(hr)) return hr;
pGraph->AddFilter(pNullRender, L"Render");
// Connect the graph
hr = ConnectFilters(pGraph, pCap, pGrabberBase);
if(FAILED(hr)) return hr;
hr = ConnectFilters(pGraph, pGrabberBase, pNullRender);
// Set output format of capture:
IAMStreamConfig *pConfig = NULL;
hr = pBuild->FindInterface(
&PIN_CATEGORY_CAPTURE, // Capture pin.
0, // Any media type.
pCap, // Pointer to the capture filter.
IID_IAMStreamConfig, (void**)&pConfig);
if (FAILED(hr)) return hr;
AM_MEDIA_TYPE *pmtConfig;
hr = pConfig->GetFormat(&pmtConfig);
if (FAILED(hr)) return hr;
// Try and find a good video format
int iCount = 0, iSize = 0;
hr = pConfig->GetNumberOfCapabilities(&iCount, &iSize);
// Check the size to make sure we pass in the correct structure.
if (iSize == sizeof(VIDEO_STREAM_CONFIG_CAPS))
{
// Use the video capabilities structure.
for (int iFormat = 0; iFormat < iCount; iFormat++)
{
VIDEO_STREAM_CONFIG_CAPS scc;
AM_MEDIA_TYPE *pmtConfig;
hr = pConfig->GetStreamCaps(iFormat, &pmtConfig, (BYTE*)&scc);
if (SUCCEEDED(hr))
{
VIDEOINFOHEADER *hdr = (VIDEOINFOHEADER *)pmtConfig->pbFormat;
//.........这里部分代码省略.........
示例14: CaptureVideo
HRESULT CaptureVideo()
{
HRESULT hr;
IBaseFilter *pSrcFilter=NULL;
// Get DirectShow interfaces
hr = GetInterfaces();
if (FAILED(hr))
{
Msg(TEXT("Failed to get video interfaces! hr=0x%x"), hr);
return hr;
}
// Attach the filter graph to the capture graph
hr = g_pCapture->SetFiltergraph(g_pGraph);
if (FAILED(hr))
{
Msg(TEXT("Failed to set capture filter graph! hr=0x%x"), hr);
return hr;
}
// Use the system device enumerator and class enumerator to find
// a video capture/preview device, such as a desktop USB video camera.
hr = FindCaptureDevice(&pSrcFilter);
if (FAILED(hr))
{
// Don't display a message because FindCaptureDevice will handle it
return hr;
}
// Add Capture filter to our graph.
hr = g_pGraph->AddFilter(pSrcFilter, L"Video Capture");
if (FAILED(hr))
{
Msg(TEXT("Couldn't add the capture filter to the graph! hr=0x%x\r\n\r\n")
TEXT("If you have a working video capture device, please make sure\r\n")
TEXT("that it is connected and is not being used by another application.\r\n\r\n")
TEXT("The sample will now close."), hr);
pSrcFilter->Release();
return hr;
}
// Copied code
//========================================
IAMStreamConfig *pSC;
hr = g_pCapture->FindInterface(&PIN_CATEGORY_PREVIEW,
&MEDIATYPE_Interleaved,
pSrcFilter, IID_IAMStreamConfig, (void **)&pSC);
if(FAILED(hr))
hr = g_pCapture->FindInterface(&PIN_CATEGORY_PREVIEW,
&MEDIATYPE_Video, pSrcFilter,
IID_IAMStreamConfig, (void **)&pSC);
if (!pSC) {
return hr;
}
int iCount = 0, iSize = 0;
hr = pSC->GetNumberOfCapabilities(&iCount, &iSize);
// Check the size to make sure we pass in the correct structure.
if (iSize == sizeof(VIDEO_STREAM_CONFIG_CAPS))
{
// Use the video capabilities structure.
int i = 0;
for (int iFormat = 0; iFormat < iCount; iFormat++)
{
VIDEO_STREAM_CONFIG_CAPS scc;
AM_MEDIA_TYPE *pmtConfig;
hr = pSC->GetFormat(&pmtConfig);
VIDEOINFOHEADER *pvi = (VIDEOINFOHEADER *)pmtConfig->pbFormat;
double fps = 30;
pvi->AvgTimePerFrame = (LONGLONG)(10000000/fps);
pvi->bmiHeader.biSizeImage = DIBSIZE(pvi->bmiHeader);
pvi->bmiHeader.biWidth = 1920;
pvi->bmiHeader.biHeight = 1080;
hr = pSC->SetFormat(pmtConfig);
//hr = pSC->GetStreamCaps(iFormat, &pmtConfig, (BYTE*)&scc);
//if (SUCCEEDED(hr))
//{
// /* Examine the format, and possibly use it. */
// if (pmtConfig->formattype == FORMAT_VideoInfo) {
// long width = HEADER(pmtConfig->pbFormat)->biWidth;
// long height = HEADER(pmtConfig->pbFormat)->biHeight;
//.........这里部分代码省略.........
示例15: if
//.........这里部分代码省略.........
cam_cfg.device = cam_id;
sprintf(cam_cfg.name, "%s", nDeviceName);
int iCount = 0;
int iSize = 0;
hr = lpStreamConfig->GetNumberOfCapabilities(&iCount, &iSize);
std::vector<CameraConfig> fmt_list;
if (iSize == sizeof(VIDEO_STREAM_CONFIG_CAPS))
{
GUID lastFormat = MEDIASUBTYPE_None;
for (int iFormat = 0; iFormat < iCount; iFormat+=2)
{
VIDEO_STREAM_CONFIG_CAPS scc;
AM_MEDIA_TYPE *pmtConfig;
hr = lpStreamConfig->GetStreamCaps(iFormat, &pmtConfig, (BYTE*)&scc);
if (SUCCEEDED(hr)){
if ( pmtConfig->subtype != lastFormat) {
if (fmt_list.size()>0) {
std::sort(fmt_list.begin(), fmt_list.end());
cfg_list.insert( cfg_list.end(), fmt_list.begin(), fmt_list.end() );
fmt_list.clear();
}
cam_cfg.cam_format = getMediaSubtype(pmtConfig->subtype);
lastFormat = pmtConfig->subtype;
}
int stepX = scc.OutputGranularityX;
int stepY = scc.OutputGranularityY;
if(stepX < 1 || stepY < 1) continue;
else if ((stepX==1) && (stepY==1)) {
cam_cfg.cam_width = scc.InputSize.cx;
cam_cfg.cam_height = scc.InputSize.cy;
int maxFrameInterval = scc.MaxFrameInterval;
if (maxFrameInterval==0) maxFrameInterval = 10000000;
float last_fps=-1;
VIDEOINFOHEADER *pVih = (VIDEOINFOHEADER*)pmtConfig->pbFormat;
for (int iv=scc.MinFrameInterval;iv<=maxFrameInterval;iv=iv*2) {
pVih->AvgTimePerFrame = iv;
hr = lpStreamConfig->SetFormat(pmtConfig);
if (hr==S_OK) { hr = lpStreamConfig->GetFormat(&pmtConfig);
float fps = ((int)floor(100000000.0f/(float)pVih->AvgTimePerFrame + 0.5f))/10.0f;
if (fps!=last_fps) {
cam_cfg.cam_fps = fps;
fmt_list.push_back(cam_cfg);
last_fps=fps;
} }
}
} else {
int x,y;
for (x=scc.MinOutputSize.cx,y=scc.MinOutputSize.cy;x<=scc.MaxOutputSize.cx,y<=scc.MaxOutputSize.cy;x+=stepX,y+=stepY) {
cam_cfg.cam_width = x;
cam_cfg.cam_height = y;
int maxFrameInterval = scc.MaxFrameInterval;
if (maxFrameInterval==0) maxFrameInterval = 10000000;
float last_fps=-1;
VIDEOINFOHEADER *pVih = (VIDEOINFOHEADER*)pmtConfig->pbFormat;
for (int iv=scc.MinFrameInterval;iv<=maxFrameInterval;iv=iv*2) {
pVih->AvgTimePerFrame = iv;
hr = lpStreamConfig->SetFormat(pmtConfig);
if (hr==S_OK) { hr = lpStreamConfig->GetFormat(&pmtConfig);
float fps = ((int)floor(100000000.0f/(float)pVih->AvgTimePerFrame + 0.5f))/10.0f;
if (fps!=last_fps) {
cam_cfg.cam_fps = fps;
fmt_list.push_back(cam_cfg);
last_fps=fps;
} }
}
}
}
deleteMediaType(pmtConfig);
}
}
}
if (fmt_list.size()>0) {
std::sort(fmt_list.begin(), fmt_list.end());
cfg_list.insert( cfg_list.end(), fmt_list.begin(), fmt_list.end() );
fmt_list.clear();
}
lpStreamConfig->Release();
lpInputFilter->Release();
lpGraphBuilder->Release();
lpCaptureGraphBuilder->Release();
}
comUnInit();
return cfg_list;
}