本文整理汇总了C++中IAMStreamConfig::GetNumberOfCapabilities方法的典型用法代码示例。如果您正苦于以下问题:C++ IAMStreamConfig::GetNumberOfCapabilities方法的具体用法?C++ IAMStreamConfig::GetNumberOfCapabilities怎么用?C++ IAMStreamConfig::GetNumberOfCapabilities使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类IAMStreamConfig
的用法示例。
在下文中一共展示了IAMStreamConfig::GetNumberOfCapabilities方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: EnumResolutions
void VideoCapture::EnumResolutions()
{
int iCount, iSize, iChosen=-1;
IBaseFilter *pSource;
CComPtr <ICaptureGraphBuilder2> pCaptB;
VIDEO_STREAM_CONFIG_CAPS caps;
HRESULT hr;
bool response;
IAMStreamConfig *pConfig;
devices_resolutions = new DeviceResolutions[nDevices];
pCaptB.CoCreateInstance(CLSID_CaptureGraphBuilder2);
for (unsigned int iDevice=0; iDevice<nDevices; iDevice++)
{
response = BindFilter(iDevice, &pSource);
hr = pCaptB->FindInterface(
&PIN_CATEGORY_CAPTURE,
&MEDIATYPE_Video,
pSource,
IID_IAMStreamConfig,
(void**)&pConfig);
if (!SUCCEEDED(hr))
{
pSource->Release();
devices_resolutions[iDevice].nResolutions = 0;
continue;
}
pConfig->GetNumberOfCapabilities(&iCount, &iSize);
devices_resolutions[iDevice].SetNResolutions(iCount);
for(int i=0; i < iCount; i++) {
AM_MEDIA_TYPE *pmt;
if( pConfig->GetStreamCaps(i, &pmt, reinterpret_cast<BYTE*>(&caps)) == S_OK ) {
VIDEOINFOHEADER *pVih =
reinterpret_cast<VIDEOINFOHEADER*>(pmt->pbFormat);
devices_resolutions[iDevice].x[i] = caps.InputSize.cx;
devices_resolutions[iDevice].y[i] = caps.InputSize.cy;
devices_resolutions[iDevice].color_space[i] = pmt->subtype;
devices_resolutions[iDevice].compression[i] = pVih->bmiHeader.biCompression;
DeleteMediaType(pmt);
}
}
pSource->Release();
pConfig->Release();
pSource = 0;
}
}
示例2: listGUIDS
bool MIPDirectShowCapture::listGUIDS(std::list<GUID> &guids)
{
guids.clear();
HRESULT hr;
IAMStreamConfig *pConfig = 0;
hr = m_pBuilder->FindInterface(&PIN_CATEGORY_CAPTURE, 0, m_pCaptDevice, IID_IAMStreamConfig, (void**)&pConfig);
if (HR_FAILED(hr))
{
setErrorString(MIPDIRECTSHOWCAPTURE_ERRSTR_CANTGETDEVICECONFIG);
return false;
}
int count = 0;
int s = 0;
hr = pConfig->GetNumberOfCapabilities(&count, &s);
if (HR_FAILED(hr))
{
pConfig->Release();
setErrorString(MIPDIRECTSHOWCAPTURE_ERRSTR_CANTGETDEVICECAPS);
return false;
}
if (s != sizeof(VIDEO_STREAM_CONFIG_CAPS))
{
pConfig->Release();
setErrorString(MIPDIRECTSHOWCAPTURE_ERRSTR_INVALIDCAPS);
return false;
}
for (int i = 0; i < count; i++)
{
VIDEO_STREAM_CONFIG_CAPS caps;
AM_MEDIA_TYPE *pMediaType;
hr = pConfig->GetStreamCaps(i, &pMediaType, (BYTE*)&caps);
if (HR_SUCCEEDED(hr))
{
if (pMediaType->majortype == MEDIATYPE_Video)
{
GUID subType = pMediaType->subtype;
guids.push_back(subType);
// uint8_t *pSubType = (uint8_t *)&subType;
//
// printf("0x%02x%02x%02x%02x %c%c%c%c\n",(int)pSubType[0],(int)pSubType[1],(int)pSubType[2],(int)pSubType[3],
// (char)pSubType[0],(char)pSubType[1],(char)pSubType[2],(char)pSubType[3]);
}
}
}
return true;
}
示例3: initSupportedFormats
void DSCaptureDevice::initSupportedFormats()
{
HRESULT ret;
IAMStreamConfig* streamConfig = NULL;
AM_MEDIA_TYPE* mediaType = NULL;
ret = m_captureGraphBuilder->FindInterface(&PIN_CATEGORY_CAPTURE, &MEDIATYPE_Video,
m_srcFilter, IID_IAMStreamConfig, (void**)&streamConfig);
/* get to find all supported formats */
if(!FAILED(ret))
{
int nb = 0;
int size = 0;
BYTE* allocBytes = NULL;
streamConfig->GetNumberOfCapabilities(&nb, &size);
allocBytes = new BYTE[size];
for(int i = 0 ; i < nb ; i++)
{
if(streamConfig->GetStreamCaps(i, &mediaType, allocBytes) == S_OK)
{
struct DSFormat format;
VIDEOINFOHEADER* hdr = (VIDEOINFOHEADER*)mediaType->pbFormat;
if(hdr)
{
format.height = hdr->bmiHeader.biHeight;
format.width = hdr->bmiHeader.biWidth;
format.pixelFormat = mediaType->subtype.Data1;
format.mediaType = mediaType->subtype;
m_formats.push_back(format);
}
}
}
delete allocBytes;
}
}
示例4: getDevFilter
QVector<VideoMode> DirectShow::getDeviceModes(QString devName)
{
QVector<VideoMode> modes;
IBaseFilter* devFilter = getDevFilter(devName);
if (!devFilter)
return modes;
// The outter loop tries to find a valid output pin
GUID category;
DWORD r2;
IEnumPins *pins = nullptr;
IPin *pin;
if (devFilter->EnumPins(&pins) != S_OK)
return modes;
while (pins->Next(1, &pin, nullptr) == S_OK)
{
IKsPropertySet *p = nullptr;
PIN_INFO info;
pin->QueryPinInfo(&info);
info.pFilter->Release();
if (info.dir != PINDIR_OUTPUT)
goto next;
if (pin->QueryInterface(IID_IKsPropertySet, (void**)&p) != S_OK)
goto next;
if (p->Get(AMPROPSETID_Pin, AMPROPERTY_PIN_CATEGORY,
nullptr, 0, &category, sizeof(GUID), &r2) != S_OK)
goto next;
if (!IsEqualGUID(category, PIN_CATEGORY_CAPTURE))
goto next;
// Now we can list the video modes for the current pin
// Prepare for another wall of spaghetti DIRECT SHOW QUALITY code
{
IAMStreamConfig *config = nullptr;
VIDEO_STREAM_CONFIG_CAPS *vcaps = nullptr;
int size, n;
if (pin->QueryInterface(IID_IAMStreamConfig, (void**)&config) != S_OK)
goto next;
if (config->GetNumberOfCapabilities(&n, &size) != S_OK)
goto pinend;
assert(size == sizeof(VIDEO_STREAM_CONFIG_CAPS));
vcaps = new VIDEO_STREAM_CONFIG_CAPS;
for (int i=0; i<n; ++i)
{
AM_MEDIA_TYPE* type = nullptr;
if (config->GetStreamCaps(i, &type, (BYTE*)vcaps) != S_OK)
goto nextformat;
if (!IsEqualGUID(type->formattype, FORMAT_VideoInfo)
&& !IsEqualGUID(type->formattype, FORMAT_VideoInfo2))
goto nextformat;
VideoMode mode;
mode.width = vcaps->MaxOutputSize.cx;
mode.height = vcaps->MaxOutputSize.cy;
mode.FPS = 1e7 / vcaps->MinFrameInterval;
if (!modes.contains(mode))
modes.append(std::move(mode));
nextformat:
if (type->pbFormat)
CoTaskMemFree(type->pbFormat);
CoTaskMemFree(type);
}
pinend:
config->Release();
delete vcaps;
}
next:
if (p)
p->Release();
pin->Release();
}
return modes;
}
示例5: if
//.........这里部分代码省略.........
{
return -14;
}
hr = s->m_pGraph->AddFilter(s->m_pIDXFilter, L"DXFilter Filter");
if(FAILED(hr))
{
return -15;
}
// get null renderer
hr=CoCreateInstance (CLSID_NullRenderer,
NULL,
CLSCTX_INPROC_SERVER,
IID_IBaseFilter,
(void **)&s->m_pNullRenderer);
if(FAILED(hr))
{
return -16;
}
if (s->m_pNullRenderer!=NULL)
{
s->m_pGraph->AddFilter(s->m_pNullRenderer, L"Null Renderer");
}
hr = s->m_pBuilder->RenderStream(&pPinCategory,
&MEDIATYPE_Video, s->m_pDeviceFilter, s->m_pIDXFilter, s->m_pNullRenderer);
if (FAILED(hr))
{
return -17;
}
IAMStreamConfig *pConfig = NULL;
hr = s->m_pBuilder->FindInterface(
&pPinCategory, // Preview pin.
&MEDIATYPE_Video, // Any media type.
s->m_pDeviceFilter, // Pointer to the capture filter.
IID_IAMStreamConfig, (void**)&pConfig);
if (pConfig!=NULL)
{
AM_MEDIA_TYPE *pType = NULL;
int iCount, iSize;
pConfig->GetNumberOfCapabilities(&iCount, &iSize);
for (int i = 0; i < iCount; i++) {
VIDEO_STREAM_CONFIG_CAPS scc;
pType = NULL;
pConfig->GetStreamCaps(i, &pType, (BYTE *)&scc);
if (!((pType->formattype == FORMAT_VideoInfo) &&
(pType->cbFormat >= sizeof(VIDEOINFOHEADER)) &&
(pType->pbFormat != NULL)))
continue;
VIDEOINFOHEADER & videoInfo = *(VIDEOINFOHEADER *)pType->pbFormat;
if (m != pType->subtype)
continue;
if (videoInfo.bmiHeader.biWidth != s->vsize.width)
continue;
if (videoInfo.bmiHeader.biHeight != s->vsize.height)
continue;
if (videoInfo.bmiHeader.biBitCount != pvi->bmiHeader.biBitCount)
continue;
if (videoInfo.bmiHeader.biCompression != pvi->bmiHeader.biCompression)
continue;
videoInfo.AvgTimePerFrame = UNITS / (LONGLONG)s->fps;
pConfig->SetFormat(pType);
}
pConfig->GetFormat(&pType);
if (pType!=NULL)
{
VIDEOINFO *pvi;
pvi = (VIDEOINFO *)pType->pbFormat;
ms_message("v4w: camera asked fps=%.2f // real fps=%.2f", s->fps, ((float)UNITS / (float)pvi->AvgTimePerFrame));
}
pConfig->Release();
}
//m_pDXFilter->SetBufferSamples(TRUE);
s_callback = s;
hr = s->m_pControl->Run();
if(FAILED(hr))
{
return -18;
}
s->rotregvalue=1;
return 0;
}
示例6: IAMStreamConfig
static GstCaps *
gst_dshowvideosrc_getcaps_from_streamcaps (GstDshowVideoSrc * src, IPin * pin)
{
GstCaps *caps = NULL;
HRESULT hres = S_OK;
int icount = 0;
int isize = 0;
VIDEO_STREAM_CONFIG_CAPS vscc;
int i = 0;
IAMStreamConfig *streamcaps = NULL;
hres = pin->QueryInterface (IID_IAMStreamConfig, (LPVOID *) & streamcaps);
if (FAILED (hres)) {
GST_ERROR ("Failed to retrieve IAMStreamConfig (error=0x%x)", hres);
return NULL;
}
streamcaps->GetNumberOfCapabilities (&icount, &isize);
if (isize != sizeof (vscc)) {
streamcaps->Release ();
return NULL;
}
caps = gst_caps_new_empty ();
for (i = 0; i < icount; i++) {
GstCapturePinMediaType *pin_mediatype =
gst_dshow_new_pin_mediatype_from_streamcaps (pin, i, streamcaps);
if (pin_mediatype) {
GstCaps *mediacaps = NULL;
GstVideoFormat video_format =
gst_dshow_guid_to_gst_video_format (pin_mediatype->mediatype);
if (video_format != GST_VIDEO_FORMAT_UNKNOWN) {
mediacaps = gst_dshow_new_video_caps (video_format, NULL,
pin_mediatype);
} else if (gst_dshow_check_mediatype (pin_mediatype->mediatype,
MEDIASUBTYPE_dvsd, FORMAT_VideoInfo)) {
mediacaps =
gst_dshow_new_video_caps (GST_VIDEO_FORMAT_UNKNOWN,
"video/x-dv, systemstream=FALSE", pin_mediatype);
} else if (gst_dshow_check_mediatype (pin_mediatype->mediatype,
MEDIASUBTYPE_dvsd, FORMAT_DvInfo)) {
mediacaps =
gst_dshow_new_video_caps (GST_VIDEO_FORMAT_UNKNOWN,
"video/x-dv, systemstream=TRUE", pin_mediatype);
pin_mediatype->granularityWidth = 0;
pin_mediatype->granularityHeight = 0;
} else if(gst_dshow_check_mediatype(pin_mediatype->mediatype,
MEDIASUBTYPE_MJPG, FORMAT_VideoInfo)) {
mediacaps = gst_dshow_new_video_caps(GST_VIDEO_FORMAT_UNKNOWN,
"image/jpeg", pin_mediatype);
}
if (mediacaps) {
src->pins_mediatypes =
g_list_append (src->pins_mediatypes, pin_mediatype);
gst_caps_append (caps, mediacaps);
} else {
/* failed to convert dshow caps */
gst_dshow_free_pin_mediatype (pin_mediatype);
}
}
}
streamcaps->Release ();
if (caps && gst_caps_is_empty (caps)) {
gst_caps_unref (caps);
caps = NULL;
}
return caps;
}
示例7: setCaptureOutputFormat
void DirectShowGrabber::setCaptureOutputFormat() {
IAMStreamConfig *pConfig;
int iCount;
int iSize;
VIDEOINFOHEADER *pVih;
VIDEO_STREAM_CONFIG_CAPS scc;
AM_MEDIA_TYPE *pmtConfig;
int formatSet;
HRESULT hr;
// Reference http://msdn.microsoft.com/library/default.asp?url=/library/en-us/directshow/htm/configurethevideooutputformat.asp
debug_msg("DirectShowGrabber::setCaptureOutputFormat(): enter...\n");
formatSet = 0;
pConfig = NULL;
hr = pBuild_->FindInterface(&PIN_CATEGORY_CAPTURE, &MEDIATYPE_Video,
pCaptureFilter_, IID_IAMStreamConfig, (void**)&pConfig);
if (FAILED(hr)) {
Grabber::status_=-1;
return;
}
debug_msg("DirectShowGrabber::setCaptureOutputFormat(): IAMStreamConfig interface acquired\n");
iCount = iSize = 0;
hr = pConfig->GetNumberOfCapabilities(&iCount, &iSize);
// Check the size to make sure we pass in the correct structure.
// The alternative output of iSize is AUDIO_STREAM_CONFIG_CAPS, btw.
if ( iSize == sizeof(VIDEO_STREAM_CONFIG_CAPS) ) {
for (int iFormat = 0; iFormat < iCount; iFormat++) {
hr = pConfig->GetStreamCaps(iFormat, &pmtConfig, (BYTE *)&scc);
//showErrorMessage(hr);
if( SUCCEEDED(hr) ) {
if ((pmtConfig->majortype == MEDIATYPE_Video) &&
(pmtConfig->subtype == MEDIASUBTYPE_RGB24) &&
(pmtConfig->formattype == FORMAT_VideoInfo) &&
(pmtConfig->cbFormat >= sizeof (VIDEOINFOHEADER)) &&
(pmtConfig->pbFormat != NULL)) {
pVih = (VIDEOINFOHEADER *)pmtConfig->pbFormat;
pVih->bmiHeader.biWidth = 320;
pVih->bmiHeader.biHeight = 240;
pVih->bmiHeader.biSizeImage = DIBSIZE(pVih->bmiHeader);
debug_msg("Windows GDI BITMAPINFOHEADER follows:\n");
debug_msg("biWidth= %d\n", pVih->bmiHeader.biWidth);
debug_msg("biHeight= %d\n", pVih->bmiHeader.biHeight);
debug_msg("biSize= %d\n", pVih->bmiHeader.biSize);
debug_msg("biPlanes= %d\n", pVih->bmiHeader.biPlanes);
debug_msg("biBitCount= %d\n", pVih->bmiHeader.biBitCount);
debug_msg("biCompression= %d\n", pVih->bmiHeader.biCompression);
debug_msg("biSizeImage= %d\n", pVih->bmiHeader.biSizeImage);
debug_msg("biXPelsPerMeter=%d\n", pVih->bmiHeader.biXPelsPerMeter);
debug_msg("biYPelsPerMeter=%d\n", pVih->bmiHeader.biYPelsPerMeter);
debug_msg("biClrUsed= %d\n", pVih->bmiHeader.biClrUsed);
debug_msg("biClrImportant= %d\n", pVih->bmiHeader.biClrImportant);
hr = pConfig->SetFormat(pmtConfig);
//showErrorMessage(hr);
// XXX: leak. need to deal with this - msp
//DeleteMediaType(pmtConfig);
formatSet = 1;
break;
}
}
}
}
pConfig->Release();
if( formatSet )
debug_msg("DirectShowGrabber::setCaptureOutputFormat: format set\n");
else
debug_msg("DirectShowGrabber::setCaptureOutputFormat: format not set\n");
}
示例8: setFormat
bool MIPDirectShowCapture::setFormat(int w, int h, real_t rate)
{
HRESULT hr;
IAMStreamConfig *pConfig = 0;
hr = m_pBuilder->FindInterface(&PIN_CATEGORY_CAPTURE, 0, m_pCaptDevice, IID_IAMStreamConfig, (void**)&pConfig);
if (HR_FAILED(hr))
{
setErrorString(MIPDIRECTSHOWCAPTURE_ERRSTR_CANTGETDEVICECONFIG);
return false;
}
int count = 0;
int s = 0;
hr = pConfig->GetNumberOfCapabilities(&count, &s);
if (HR_FAILED(hr))
{
pConfig->Release();
setErrorString(MIPDIRECTSHOWCAPTURE_ERRSTR_CANTGETDEVICECAPS);
return false;
}
if (s != sizeof(VIDEO_STREAM_CONFIG_CAPS))
{
pConfig->Release();
setErrorString(MIPDIRECTSHOWCAPTURE_ERRSTR_INVALIDCAPS);
return false;
}
for (int i = 0; i < count; i++)
{
VIDEO_STREAM_CONFIG_CAPS caps;
AM_MEDIA_TYPE *pMediaType;
hr = pConfig->GetStreamCaps(i, &pMediaType, (BYTE*)&caps);
if (HR_SUCCEEDED(hr))
{
if ((pMediaType->majortype == MEDIATYPE_Video) &&
(pMediaType->subtype == m_selectedGuid) &&
(pMediaType->formattype == FORMAT_VideoInfo) &&
(pMediaType->cbFormat >= sizeof (VIDEOINFOHEADER)) &&
(pMediaType->pbFormat != 0))
{
VIDEOINFOHEADER *pVih = (VIDEOINFOHEADER*)pMediaType->pbFormat;
pVih->bmiHeader.biWidth = w;
pVih->bmiHeader.biHeight = h;
pVih->bmiHeader.biSizeImage = DIBSIZE(pVih->bmiHeader);
pVih->AvgTimePerFrame = (REFERENCE_TIME)(10000000.0/rate);
hr = pConfig->SetFormat(pMediaType);
if (HR_SUCCEEDED(hr))
{
CoTaskMemFree(pMediaType->pbFormat);
pConfig->Release();
return true;
}
}
if (pMediaType->pbFormat != 0)
CoTaskMemFree(pMediaType->pbFormat);
}
}
pConfig->Release();
setErrorString(MIPDIRECTSHOWCAPTURE_ERRSTR_CANTSETCAPS);
return false;
}
示例9: setFormat
HRESULT DSCaptureDevice::setFormat(const DSFormat& format)
{
HRESULT hr;
IAMStreamConfig* streamConfig = NULL;
/* get the right interface to change capture settings */
hr
= m_captureGraphBuilder->FindInterface(
&PIN_CATEGORY_CAPTURE,
&MEDIATYPE_Video,
m_srcFilter,
IID_IAMStreamConfig,
(void**) &streamConfig);
if(SUCCEEDED(hr))
{
int nb = 0;
int size = 0;
AM_MEDIA_TYPE* mediaType = NULL;
size_t bitCount = 0;
hr = streamConfig->GetNumberOfCapabilities(&nb, &size);
if (SUCCEEDED(hr) && nb)
{
BYTE* scc = new BYTE[size];
if (scc)
{
DWORD pixfmt = format.pixelFormat;
for (int i = 0 ; i < nb ; i++)
{
AM_MEDIA_TYPE* mt;
if (streamConfig->GetStreamCaps(i, &mt, scc) == S_OK)
{
VIDEOINFOHEADER* hdr = (VIDEOINFOHEADER*) mt->pbFormat;
if (hdr
&& (mt->subtype.Data1 == pixfmt)
&& ((long) format.height
== hdr->bmiHeader.biHeight)
&& ((long) format.width
== hdr->bmiHeader.biWidth))
{
mediaType = mt;
if ((pixfmt == MEDIASUBTYPE_ARGB32.Data1)
|| (pixfmt == MEDIASUBTYPE_RGB32.Data1))
bitCount = 32;
else if (pixfmt == MEDIASUBTYPE_RGB24.Data1)
bitCount = 24;
else
bitCount = hdr->bmiHeader.biBitCount;
break;
}
else
_DeleteMediaType(mt);
}
}
delete[] scc;
}
else
hr = E_OUTOFMEMORY;
}
if (mediaType)
{
hr = streamConfig->SetFormat(mediaType);
if (SUCCEEDED(hr))
{
m_bitPerPixel = bitCount;
m_format = format;
m_format.mediaType = mediaType->subtype;
}
_DeleteMediaType(mediaType);
}
else if (SUCCEEDED(hr))
hr = E_FAIL;
streamConfig->Release();
}
return hr;
}
示例10: if
AM_MEDIA_TYPE * CCaptureDevice::SelectMediaType(void)
{
// Preferred sequence: UYVY, YUY2, RGB565, RGB555, RGB24, RGB32
VIDEO_STREAM_CONFIG_CAPS pSCC;
AM_MEDIA_TYPE * pmt = NULL;
HRESULT hr = S_OK;
int nCounts=0, nSize=0;
int preferredIndex = -1;
enum {
UYVY = 0, YUY2, RGB565, RGB555, RGB24, RGB32, Unknown
} currentPreferred, temp;
currentPreferred = Unknown;
IAMStreamConfig * pCfg = GetStreamConfig();
pCfg->GetNumberOfCapabilities(&nCounts, &nSize);
for (int i = 0; i < nCounts; i++)
{
if (pCfg->GetStreamCaps(i, &pmt, (BYTE *)&pSCC) == S_OK)
{
if (pmt->subtype == MEDIASUBTYPE_RGB32)
{
temp = RGB32;
}
else if (pmt->subtype == MEDIASUBTYPE_RGB24)
{
temp = RGB24;
}
else if (pmt->subtype == MEDIASUBTYPE_RGB565)
{
temp = RGB565;
}
else if (pmt->subtype == MEDIASUBTYPE_RGB555)
{
temp = RGB555;
}
else if (pmt->subtype == MEDIASUBTYPE_YUY2)
{
temp = YUY2;
}
else if (pmt->subtype == MEDIASUBTYPE_UYVY)
{
temp = UYVY;
}
else
{
temp = Unknown;
}
if (temp < currentPreferred)
{
currentPreferred = temp;
preferredIndex = i;
}
DeleteMediaType(pmt);
}
}
// Get the preferred media type
if (preferredIndex != -1)
{
hr = pCfg->GetStreamCaps(preferredIndex, &pmt, (BYTE *)&pSCC);
}
else
{
hr = pCfg->GetFormat(&pmt);
}
return pmt;
}
示例11: GetSupportedFormats
HRESULT CAudioCompressorFormats::GetSupportedFormats(std::vector<WAVEFORMATEX*>& listFormats)
{
CStringW swDeviceName(m_sAudComp);
HRESULT hr = m_pSysDevEnum->CreateClassEnumerator(CLSID_AudioCompressorCategory, &m_pEnumCat, 0);
if(NULL == m_pEnumCat)
return E_POINTER;
if(S_OK == hr)
{
ULONG cFetched;
while(m_pEnumCat->Next(1, &m_pMoniker, &cFetched) == S_OK)
{
IPropertyBag *pPropBag;
hr = m_pMoniker->BindToStorage(0, 0, IID_IPropertyBag,
(void **)&pPropBag);
if (SUCCEEDED(hr))
{
VARIANT varName;
VariantInit(&varName);
hr = pPropBag->Read(L"FriendlyName", &varName, 0);
if (SUCCEEDED(hr))
{
if(wcscmp((WCHAR*)varName.pbstrVal, swDeviceName.GetBuffer()) == 0)
{
m_pMoniker->AddRef();
break;
}
}
VariantClear(&varName);
pPropBag->Release();
}
m_pMoniker->Release();
}
}
if(m_pMoniker)
{
IBaseFilter *pFilter = 0;
hr = m_pMoniker->BindToObject(0, 0, IID_IBaseFilter, (void**)&pFilter);
if(SUCCEEDED(hr))
{
IEnumPins *pEnum = NULL;
hr = pFilter->EnumPins(&pEnum);
if (SUCCEEDED(hr))
{
IPin *pPin = NULL;
while(S_OK == pEnum->Next(1, &pPin, NULL))
{
IAMStreamConfig *pConf;
hr = pPin->QueryInterface(IID_IAMStreamConfig, (void**)&pConf);
if (SUCCEEDED(hr))
{
CString sFormat;
int iCount, iSize;
BYTE *pSCC = NULL;
AM_MEDIA_TYPE *pmt;
float fSample;
hr = pConf->GetNumberOfCapabilities(&iCount, &iSize);
pSCC = new BYTE[iSize];
if (pSCC == NULL)
{
return E_POINTER;
}
if (iSize == sizeof(AUDIO_STREAM_CONFIG_CAPS))
{
// Use the audio capabilities structure.
for (int iFormat = 0; iFormat < iCount; iFormat++)
{
AUDIO_STREAM_CONFIG_CAPS scc;
AM_MEDIA_TYPE *pmtConfig;
hr = pConf->GetStreamCaps(iFormat, &pmtConfig, (BYTE*)&scc);
if (SUCCEEDED(hr))
{
if(pmtConfig->formattype == FORMAT_WaveFormatEx)
{
WAVEFORMATEX *pFormat = new WAVEFORMATEX(*(reinterpret_cast<WAVEFORMATEX*>(pmtConfig->pbFormat)));
if(pFormat)
{
listFormats.push_back(pFormat);
}
FreeMediaType(*pmtConfig);
CoTaskMemFree(pmtConfig);
}
}
}
delete pSCC;
}
pConf->Release();
}
pPin->Release();
}
pEnum->Release();
}
pFilter->Release();
}
}
}
示例12: sizeof
//.........这里部分代码省略.........
if (pDeviceFilter != NULL)
{
// go out of loop if getchar() returns 'y'
break;
}
}
if (pDeviceFilter != NULL) {
//
// PLAY
//
// create FilterGraph
CoCreateInstance(CLSID_FilterGraph,
NULL,
CLSCTX_INPROC,
IID_IGraphBuilder,
(LPVOID *)&pGraphBuilder);
// create CaptureGraphBuilder2
CoCreateInstance(CLSID_CaptureGraphBuilder2, NULL, CLSCTX_INPROC,
IID_ICaptureGraphBuilder2,
(LPVOID *)&pCaptureGraphBuilder2);
//============================================================
//=========== MY CODE ======================================
//=============================================================
HRESULT hr = CoInitialize(0);
IAMStreamConfig *pConfig = NULL;
hr = pCaptureGraphBuilder2->FindInterface(&PIN_CATEGORY_CAPTURE, 0, pDeviceFilter, IID_IAMStreamConfig, (void**)&pConfig);
int iCount = 0, iSize = 0;
hr = pConfig->GetNumberOfCapabilities(&iCount, &iSize);
// Check the size to make sure we pass in the correct structure.
if (iSize == sizeof(VIDEO_STREAM_CONFIG_CAPS))
{
// Use the video capabilities structure.
for (int iFormat = 0; iFormat < iCount; iFormat++)
{
VIDEO_STREAM_CONFIG_CAPS scc;
AM_MEDIA_TYPE *pmtConfig;
hr = pConfig->GetStreamCaps(iFormat, &pmtConfig, (BYTE*)&scc);
if (SUCCEEDED(hr))
{
/* Examine the format, and possibly use it. */
if ((pmtConfig->majortype == MEDIATYPE_Video) &&
(pmtConfig->subtype == MEDIASUBTYPE_RGB24) &&
(pmtConfig->formattype == FORMAT_VideoInfo) &&
(pmtConfig->cbFormat >= sizeof (VIDEOINFOHEADER)) &&
(pmtConfig->pbFormat != NULL))
{
VIDEOINFOHEADER *pVih = (VIDEOINFOHEADER*)pmtConfig->pbFormat;
// pVih contains the detailed format information.
LONG lWidth = pVih->bmiHeader.biWidth;
LONG lHeight = pVih->bmiHeader.biHeight;
if( lWidth == 1280 )
// if (iFormat == 26)
{ //2 = '1280x720YUV' YUV, 22 = '1280x800YUV', 26 = '1280x720RGB'
hr = pConfig->SetFormat(pmtConfig);
}
}
// Delete the media type when you are done.
DeleteMediaType(pmtConfig);
示例13: init
// use cameraID 1 for first and so on
HRESULT VideoTexture::init(int cameraID)
{
if (cameraID <= 0) return S_FALSE;
glEnable(GL_TEXTURE_2D);
// Texture -> This will be put into the camera module
glGenTextures(1, textures); // Create The Texture
// Typical Texture Generation Using Data From The Bitmap
for (int i = 0; i < 1; i++)
{
//glActiveTexture(GL_TEXTURE0 + i);
glBindTexture(GL_TEXTURE_2D, textures[i]);
// Generate The Texture (640x480... make changeable!)
//glTexImage2D(GL_TEXTURE_2D, 0, 3, 640, 480, 0, GL_RGB, GL_UNSIGNED_BYTE, ...THe data111!!!);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR); // Linear Filtering
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR); // Linear Filtering
// Enable Texture Mapping
glTexImage2D(GL_TEXTURE_2D, 0, 3, TEXTURE_WIDTH, TEXTURE_HEIGHT, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
}
// Video stuff:
// Create captue graph builder:
HRESULT hr = InitCaptureGraphBuilder(&pGraph, &pBuild);
if (FAILED(hr)) return hr;
IEnumMoniker *enumerator;
hr = EnumerateDevices(CLSID_VideoInputDeviceCategory, &enumerator);
//DisplayDeviceInformation(enumerator);
// Take the first camera:
IMoniker *pMoniker = NULL;
for (int i = 0; i < cameraID; i++)
{
enumerator->Next(1, &pMoniker, NULL);
}
IBaseFilter *pCap = NULL;
hr = pMoniker->BindToObject(0, 0, IID_IBaseFilter, (void**)&pCap);
if (SUCCEEDED(hr))
{
hr = pGraph->AddFilter(pCap, L"Capture Filter");
if (FAILED(hr)) return hr;
}
else return hr;
// Create the Sample Grabber which we will use
// To take each frame for texture generation
hr = CoCreateInstance(CLSID_SampleGrabber, NULL, CLSCTX_INPROC_SERVER,
IID_ISampleGrabber, (void **)&pGrabber);
if (FAILED(hr)) return hr;
hr = pGrabber->QueryInterface(IID_IBaseFilter, (void **)&pGrabberBase);
// We have to set the 24-bit RGB desire here
// So that the proper conversion filters
// Are added automatically.
AM_MEDIA_TYPE desiredType;
memset(&desiredType, 0, sizeof(desiredType));
desiredType.majortype = MEDIATYPE_Video;
desiredType.subtype = MEDIASUBTYPE_RGB24;
desiredType.formattype = FORMAT_VideoInfo;
pGrabber->SetMediaType(&desiredType);
pGrabber->SetBufferSamples(TRUE);
// add to Graph
pGraph->AddFilter(pGrabberBase, L"Grabber");
/* Null render filter */
hr = CoCreateInstance(CLSID_NullRenderer, NULL, CLSCTX_INPROC_SERVER, IID_IBaseFilter, (void**)&pNullRender);
if(FAILED(hr)) return hr;
pGraph->AddFilter(pNullRender, L"Render");
// Connect the graph
hr = ConnectFilters(pGraph, pCap, pGrabberBase);
if(FAILED(hr)) return hr;
hr = ConnectFilters(pGraph, pGrabberBase, pNullRender);
// Set output format of capture:
IAMStreamConfig *pConfig = NULL;
hr = pBuild->FindInterface(
&PIN_CATEGORY_CAPTURE, // Capture pin.
0, // Any media type.
pCap, // Pointer to the capture filter.
IID_IAMStreamConfig, (void**)&pConfig);
if (FAILED(hr)) return hr;
AM_MEDIA_TYPE *pmtConfig;
hr = pConfig->GetFormat(&pmtConfig);
if (FAILED(hr)) return hr;
// Try and find a good video format
int iCount = 0, iSize = 0;
hr = pConfig->GetNumberOfCapabilities(&iCount, &iSize);
// Check the size to make sure we pass in the correct structure.
if (iSize == sizeof(VIDEO_STREAM_CONFIG_CAPS))
{
// Use the video capabilities structure.
for (int iFormat = 0; iFormat < iCount; iFormat++)
{
VIDEO_STREAM_CONFIG_CAPS scc;
AM_MEDIA_TYPE *pmtConfig;
hr = pConfig->GetStreamCaps(iFormat, &pmtConfig, (BYTE*)&scc);
if (SUCCEEDED(hr))
{
VIDEOINFOHEADER *hdr = (VIDEOINFOHEADER *)pmtConfig->pbFormat;
//.........这里部分代码省略.........
示例14: CaptureVideo
HRESULT CaptureVideo()
{
HRESULT hr;
IBaseFilter *pSrcFilter=NULL;
// Get DirectShow interfaces
hr = GetInterfaces();
if (FAILED(hr))
{
Msg(TEXT("Failed to get video interfaces! hr=0x%x"), hr);
return hr;
}
// Attach the filter graph to the capture graph
hr = g_pCapture->SetFiltergraph(g_pGraph);
if (FAILED(hr))
{
Msg(TEXT("Failed to set capture filter graph! hr=0x%x"), hr);
return hr;
}
// Use the system device enumerator and class enumerator to find
// a video capture/preview device, such as a desktop USB video camera.
hr = FindCaptureDevice(&pSrcFilter);
if (FAILED(hr))
{
// Don't display a message because FindCaptureDevice will handle it
return hr;
}
// Add Capture filter to our graph.
hr = g_pGraph->AddFilter(pSrcFilter, L"Video Capture");
if (FAILED(hr))
{
Msg(TEXT("Couldn't add the capture filter to the graph! hr=0x%x\r\n\r\n")
TEXT("If you have a working video capture device, please make sure\r\n")
TEXT("that it is connected and is not being used by another application.\r\n\r\n")
TEXT("The sample will now close."), hr);
pSrcFilter->Release();
return hr;
}
// Copied code
//========================================
IAMStreamConfig *pSC;
hr = g_pCapture->FindInterface(&PIN_CATEGORY_PREVIEW,
&MEDIATYPE_Interleaved,
pSrcFilter, IID_IAMStreamConfig, (void **)&pSC);
if(FAILED(hr))
hr = g_pCapture->FindInterface(&PIN_CATEGORY_PREVIEW,
&MEDIATYPE_Video, pSrcFilter,
IID_IAMStreamConfig, (void **)&pSC);
if (!pSC) {
return hr;
}
int iCount = 0, iSize = 0;
hr = pSC->GetNumberOfCapabilities(&iCount, &iSize);
// Check the size to make sure we pass in the correct structure.
if (iSize == sizeof(VIDEO_STREAM_CONFIG_CAPS))
{
// Use the video capabilities structure.
int i = 0;
for (int iFormat = 0; iFormat < iCount; iFormat++)
{
VIDEO_STREAM_CONFIG_CAPS scc;
AM_MEDIA_TYPE *pmtConfig;
hr = pSC->GetFormat(&pmtConfig);
VIDEOINFOHEADER *pvi = (VIDEOINFOHEADER *)pmtConfig->pbFormat;
double fps = 30;
pvi->AvgTimePerFrame = (LONGLONG)(10000000/fps);
pvi->bmiHeader.biSizeImage = DIBSIZE(pvi->bmiHeader);
pvi->bmiHeader.biWidth = 1920;
pvi->bmiHeader.biHeight = 1080;
hr = pSC->SetFormat(pmtConfig);
//hr = pSC->GetStreamCaps(iFormat, &pmtConfig, (BYTE*)&scc);
//if (SUCCEEDED(hr))
//{
// /* Examine the format, and possibly use it. */
// if (pmtConfig->formattype == FORMAT_VideoInfo) {
// long width = HEADER(pmtConfig->pbFormat)->biWidth;
// long height = HEADER(pmtConfig->pbFormat)->biHeight;
//.........这里部分代码省略.........
示例15: if
std::vector<CameraConfig> videoInputCamera::getCameraConfigs(int dev_id) {
std::vector<CameraConfig> cfg_list;
int count = getDeviceCount();
if (count==0) return cfg_list;
comInit();
HRESULT hr;
ICaptureGraphBuilder2 *lpCaptureGraphBuilder;
IGraphBuilder *lpGraphBuilder;
IBaseFilter *lpInputFilter;
IAMStreamConfig *lpStreamConfig;
char nDeviceName[255];
WCHAR wDeviceName[255];
for (int cam_id=0;cam_id<count;cam_id++) {
if ((dev_id>=0) && (dev_id!=cam_id)) continue;
hr = CoCreateInstance(CLSID_CaptureGraphBuilder2, NULL, CLSCTX_INPROC_SERVER, IID_ICaptureGraphBuilder2, (void **)&lpCaptureGraphBuilder);
if (FAILED(hr)) // FAILED is a macro that tests the return value
{
printf("ERROR - Could not create the Filter Graph Manager\n");
comUnInit();
return cfg_list;
}
// Create the Filter Graph Manager.
hr = CoCreateInstance(CLSID_FilterGraph, 0, CLSCTX_INPROC_SERVER,IID_IGraphBuilder, (void**)&lpGraphBuilder);
if (FAILED(hr))
{
printf("ERROR - Could not add the graph builder!\n");
lpCaptureGraphBuilder->Release();
comUnInit();
return cfg_list;
}
hr = lpCaptureGraphBuilder->SetFiltergraph(lpGraphBuilder);
if (FAILED(hr))
{
printf("ERROR - Could not set filtergraph\n");
lpGraphBuilder->Release();
lpCaptureGraphBuilder->Release();
comUnInit();
return cfg_list;
}
memset(wDeviceName, 0, sizeof(WCHAR) * 255);
memset(nDeviceName, 0, sizeof(char) * 255);
hr = getDevice(&lpInputFilter, cam_id, wDeviceName, nDeviceName);
if (SUCCEEDED(hr)){
hr = lpGraphBuilder->AddFilter(lpInputFilter, wDeviceName);
}else{
printf("ERROR - Could not find specified video device\n");
lpGraphBuilder->Release();
lpCaptureGraphBuilder->Release();
comUnInit();
return cfg_list;
}
hr = lpCaptureGraphBuilder->FindInterface(&PIN_CATEGORY_CAPTURE, &MEDIATYPE_Video, lpInputFilter, IID_IAMStreamConfig, (void **)&lpStreamConfig);
if(FAILED(hr)){
printf("ERROR: Couldn't config the stream!\n");
lpInputFilter->Release();
lpGraphBuilder->Release();
lpCaptureGraphBuilder->Release();
comUnInit();
return cfg_list;
}
CameraConfig cam_cfg;
CameraTool::initCameraConfig(&cam_cfg);
cam_cfg.driver = DRIVER_DEFAULT;
cam_cfg.device = cam_id;
sprintf(cam_cfg.name, "%s", nDeviceName);
int iCount = 0;
int iSize = 0;
hr = lpStreamConfig->GetNumberOfCapabilities(&iCount, &iSize);
std::vector<CameraConfig> fmt_list;
if (iSize == sizeof(VIDEO_STREAM_CONFIG_CAPS))
{
GUID lastFormat = MEDIASUBTYPE_None;
for (int iFormat = 0; iFormat < iCount; iFormat+=2)
{
VIDEO_STREAM_CONFIG_CAPS scc;
AM_MEDIA_TYPE *pmtConfig;
hr = lpStreamConfig->GetStreamCaps(iFormat, &pmtConfig, (BYTE*)&scc);
if (SUCCEEDED(hr)){
if ( pmtConfig->subtype != lastFormat) {
if (fmt_list.size()>0) {
std::sort(fmt_list.begin(), fmt_list.end());
cfg_list.insert( cfg_list.end(), fmt_list.begin(), fmt_list.end() );
fmt_list.clear();
//.........这里部分代码省略.........