本文整理汇总了C++中IDepthFrame类的典型用法代码示例。如果您正苦于以下问题:C++ IDepthFrame类的具体用法?C++ IDepthFrame怎么用?C++ IDepthFrame使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了IDepthFrame类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: glBindTexture
// デプスデータを取得する
GLuint KinectV2::getDepth() const
{
// デプスのテクスチャを指定する
glBindTexture(GL_TEXTURE_2D, depthTexture);
// 次のデプスのフレームデータが到着していれば
IDepthFrame *depthFrame;
if (depthReader->AcquireLatestFrame(&depthFrame) == S_OK)
{
// デプスデータのサイズと格納場所を得る
UINT depthSize;
UINT16 *depthBuffer;
depthFrame->AccessUnderlyingBuffer(&depthSize, &depthBuffer);
// カラーのテクスチャ座標を求めてバッファオブジェクトに転送する
glBindBuffer(GL_ARRAY_BUFFER, coordBuffer);
ColorSpacePoint *const texcoord(static_cast<ColorSpacePoint *>(glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY)));
coordinateMapper->MapDepthFrameToColorSpace(depthCount, depthBuffer, depthCount, texcoord);
glUnmapBuffer(GL_ARRAY_BUFFER);
// デプスデータをテクスチャに転送する
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, depthWidth, depthHeight, GL_RED, GL_UNSIGNED_SHORT, depthBuffer);
// デプスフレームを開放する
depthFrame->Release();
}
return depthTexture;
}
示例2: GetDepthFrame
void KinectCapture::GetDepthFrame(IMultiSourceFrame* pMultiFrame)
{
IDepthFrameReference* pDepthFrameReference = NULL;
IDepthFrame* pDepthFrame = NULL;
pMultiFrame->get_DepthFrameReference(&pDepthFrameReference);
HRESULT hr = pDepthFrameReference->AcquireFrame(&pDepthFrame);
if (SUCCEEDED(hr))
{
if (pDepth == NULL)
{
IFrameDescription* pFrameDescription = NULL;
hr = pDepthFrame->get_FrameDescription(&pFrameDescription);
pFrameDescription->get_Width(&nDepthFrameWidth);
pFrameDescription->get_Height(&nDepthFrameHeight);
pDepth = new UINT16[nDepthFrameHeight * nDepthFrameWidth];
SafeRelease(pFrameDescription);
}
UINT nBufferSize = nDepthFrameHeight * nDepthFrameWidth;
hr = pDepthFrame->CopyFrameDataToArray(nBufferSize, pDepth);
}
SafeRelease(pDepthFrame);
SafeRelease(pDepthFrameReference);
}
示例3: idle
void idle()
{
// Read color data
IColorFrame* pCFrame = nullptr;
if (pColorFrameReader->AcquireLatestFrame(&pCFrame) == S_OK)
{
pCFrame->CopyConvertedFrameDataToArray(uColorBufferSize, pColorBuffer, ColorImageFormat_Rgba);
pCFrame->Release();
pCFrame = nullptr;
}
// Read depth data
IDepthFrame* pDFrame = nullptr;
if (pDepthFrameReader->AcquireLatestFrame(&pDFrame) == S_OK)
{
pDFrame->CopyFrameDataToArray(uDepthPointNum, pDepthBuffer);
pDFrame->Release();
pDFrame = nullptr;
// map to camera space
pCoordinateMapper->MapColorFrameToCameraSpace(uDepthPointNum, pDepthBuffer, uColorPointNum, pCSPoints);
}
}
示例4: update
cv::Mat capKinect::update(cv::Mat& depth_show)
{
if (!m_pDepthReader) return cv::Mat();
IDepthFrame* pDepthFrame = NULL;
HRESULT hr = m_pDepthReader->AcquireLatestFrame(&pDepthFrame);
cv::Mat re;
if (SUCCEEDED(hr))
{
IFrameDescription* pFrameDescription = NULL;
int nWidth = 0;
int nHeight = 0;
USHORT nDepthMinReliableDistance = 0;
USHORT nDepthMaxDistance = 0;
UINT nBufferSize = 0;
UINT16 *pBuffer = NULL;
if (SUCCEEDED(hr))
{
hr = pDepthFrame->get_FrameDescription(&pFrameDescription);
}
if (SUCCEEDED(hr))
{
hr = pFrameDescription->get_Width(&nWidth);
}
if (SUCCEEDED(hr))
{
hr = pFrameDescription->get_Height(&nHeight);
}
if (SUCCEEDED(hr))
{
hr = pDepthFrame->get_DepthMinReliableDistance(&nDepthMinReliableDistance);
}
if (SUCCEEDED(hr))
{
// In order to see the full range of depth (including the less reliable far field depth)
// we are setting nDepthMaxDistance to the extreme potential depth threshold
nDepthMaxDistance = USHRT_MAX; //here we set maxDepth as 1000 mm (1 m) to simply cut the back background
// Note: If you wish to filter by reliable depth distance, uncomment the following line.
//// hr = pDepthFrame->get_DepthMaxReliableDistance(&nDepthMaxDistance);
}
if (SUCCEEDED(hr))
{
hr = pDepthFrame->AccessUnderlyingBuffer(&nBufferSize, &pBuffer);
}
if (SUCCEEDED(hr))
{
re=capture(pBuffer, nWidth, nHeight, depth_show, nDepthMinReliableDistance, nDepthMaxDistance);
}
if(pFrameDescription)SafeRelease(pFrameDescription);
}
if(pDepthFrame)SafeRelease(pDepthFrame);
return re;
}
示例5: getFrameData
bool KinectInterface::getFrameData(IMultiSourceFrame* frame, cv::Mat& intensity_mat, cv::Mat& depth_mat, cv::Mat& pos_mat) {
//Obtain depth frame
IDepthFrame* depthframe = nullptr;
if (FAILED(depthFrameReader->AcquireLatestFrame(&depthframe))) return false;
if (!depthframe) return false;
// Get data from frame
unsigned int sz;
unsigned short* buf;
if (FAILED(depthframe->AccessUnderlyingBuffer(&sz, &buf))) return false;
//get depth -> xyz mapping
if (FAILED(mapper->MapDepthFrameToCameraSpace(width*height, buf, width*height, depth2xyz))) return false;
//get depth -> rgb image mapping
if (FAILED(mapper->MapDepthFrameToColorSpace(width*height, buf, width*height, depth2rgb))) return false;
//save depth
if (FAILED(depthframe->CopyFrameDataToArray(height * width, depth_data)));
if (depthframe) depthframe->Release();
//Obtain RGB frame
IColorFrame* colorframe;
if (FAILED(colorFrameReader->AcquireLatestFrame(&colorframe))) return false;
if (!colorframe) return false;
// Get data from frame
if (FAILED(colorframe->CopyConvertedFrameDataToArray(colorwidth*colorheight * 4, rgbimage, ColorImageFormat_Rgba))) return false;
cv::Mat tmp_depth = cv::Mat::zeros(colorheight, colorwidth, CV_16UC1);
cv::Mat tmp_pos = cv::Mat::zeros(colorheight, colorwidth, CV_32FC3);
cv::Mat depth_org(height, width, CV_16UC1, depth_data);
cv::Mat tmp_rgb(colorheight, colorwidth, CV_8UC4, rgbimage);
// Write color array for vertices
for (int i = 0; i < width*height; i++) {
ColorSpacePoint p = depth2rgb[i];
int iY = (int)(p.Y + 0.5);
int iX = (int)(p.X + 0.5);
if (iX >= 0 && iY >= 0 && iX < colorwidth && iY < colorheight) {
// Check if color pixel coordinates are in bounds
tmp_depth.at<unsigned short>(iY, iX) = depth_data[i];
//tmp_pos.at<float>(iY, iX, 0) = depth2xyz[i].X;
//tmp_pos.at<float>(iY, iX, 1) = depth2xyz[i].Y;
//tmp_pos.at<float>(iY, iX, 2) = depth2xyz[i].Z;
}
}
if (colorframe) colorframe->Release();
cv::resize(tmp_rgb(cv::Rect(240, 0, 1440, 1080)), intensity_mat, cv::Size(640, 480));
cv::resize(tmp_depth(cv::Rect(240, 0, 1440, 1080)), depth_mat, cv::Size(640, 480));
cv::resize(tmp_pos(cv::Rect(240, 0, 1440, 1080)), pos_mat, cv::Size(640, 480));
cv::cvtColor(intensity_mat, intensity_mat, CV_RGBA2GRAY);
return true;
}
示例6: SafeRelease
bool ms_kinect2::acquire_depth_frame(const _OPENNUI byte* dst)
{
bool result = false;
IDepthFrame* pDepthFrame = NULL;
static unsigned int bufferSize = 512 * 424;
HRESULT hResult = S_OK;
hResult = pDepthReader->AcquireLatestFrame(&pDepthFrame);
if (SUCCEEDED(hResult))
{
hResult = pDepthFrame->CopyFrameDataToArray(bufferSize, (UINT16*)dst);
if (SUCCEEDED(hResult))
result = true;
}
SafeRelease(pDepthFrame);
return result;
}
示例7: Mat
void Microsoft2Grabber::DepthFrameArrived(IDepthFrameReference* pDepthFrameReference) {
IDepthFrame* pDepthFrame = NULL;
HRESULT hr = pDepthFrameReference->AcquireFrame(&pDepthFrame);
//HRESULT hr = pDepthFrameReference->AcquireLatestFrame(&pDepthFrame);
if(FAILED(hr))
return;
//cout << "got a depth frame" << endl;
INT64 nDepthTime = 0;
IFrameDescription* pDepthFrameDescription = NULL;
int nDepthWidth = 0;
int nDepthHeight = 0;
UINT nDepthBufferSize = 0;
// get depth frame data
hr = pDepthFrame->get_RelativeTime(&nDepthTime);
if (SUCCEEDED(hr)) {
hr = pDepthFrame->get_FrameDescription(&pDepthFrameDescription);
}
if (SUCCEEDED(hr)) {
hr = pDepthFrameDescription->get_Width(&nDepthWidth);
}
if (SUCCEEDED(hr)) {
hr = pDepthFrameDescription->get_Height(&nDepthHeight);
}
if (SUCCEEDED(hr)) {
hr = pDepthFrame->AccessUnderlyingBuffer(&nDepthBufferSize, &m_pDepthBuffer);
//WaitForSingleObject(hDepthMutex,INFINITE);
Mat tmp = Mat(m_depthSize, DEPTH_PIXEL_TYPE, m_pDepthBuffer, Mat::AUTO_STEP);
MatDepth depth_img = *((MatDepth*)&(tmp.clone()));
m_depthTime = nDepthTime;
if (depth_image_signal_->num_slots () > 0) {
depth_image_signal_->operator()(depth_img);
}
if (num_slots<sig_cb_microsoft_point_cloud_rgba>() > 0 || all_data_signal_->num_slots() > 0 || image_depth_image_signal_->num_slots() > 0) {
//rgb_sync_.add1 (depth_img, m_depthTime);
imageDepthOnlyImageCallback(depth_img);
}
//ReleaseMutex(hDepthMutex);
}
SafeRelease(pDepthFrameDescription);
SafeRelease(pDepthFrame);
}
示例8: while
void pcl::Kinect2Grabber::threadFunction()
{
while (!quit){
boost::unique_lock<boost::mutex> lock(mutex);
// Acquire Latest Color Frame
IColorFrame* colorFrame = nullptr;
result = colorReader->AcquireLatestFrame(&colorFrame);
if (SUCCEEDED(result)){
// Retrieved Color Data
result = colorFrame->CopyConvertedFrameDataToArray(colorBuffer.size() * sizeof(RGBQUAD), reinterpret_cast<BYTE*>(&colorBuffer[0]), ColorImageFormat::ColorImageFormat_Bgra);
if (FAILED(result)){
throw std::exception("Exception : IColorFrame::CopyConvertedFrameDataToArray()");
}
}
SafeRelease(colorFrame);
// Acquire Latest Depth Frame
IDepthFrame* depthFrame = nullptr;
result = depthReader->AcquireLatestFrame(&depthFrame);
if (SUCCEEDED(result)){
// Retrieved Depth Data
result = depthFrame->CopyFrameDataToArray(depthBuffer.size(), &depthBuffer[0]);
if (FAILED(result)){
throw std::exception("Exception : IDepthFrame::CopyFrameDataToArray()");
}
}
SafeRelease(depthFrame);
lock.unlock();
if (signal_PointXYZ->num_slots() > 0) {
signal_PointXYZ->operator()(convertDepthToPointXYZ(&depthBuffer[0]));
}
if (signal_PointXYZRGB->num_slots() > 0) {
signal_PointXYZRGB->operator()(convertRGBDepthToPointXYZRGB(&colorBuffer[0], &depthBuffer[0]));
}
if (signal_PointXYZI->num_slots() > 0) {
signal_PointXYZI->operator()(convertRGBDepthToPointXYZI(&colorBuffer[0], &depthBuffer[0]));
}
}
}
示例9: getDepthData
void MKinect::getDepthData(IMultiSourceFrame* frame, float* dest) {
IDepthFrame* depthframe;
IDepthFrameReference* frameref = NULL;
frame->get_DepthFrameReference(&frameref);
frameref->AcquireFrame(&depthframe);
if (frameref) frameref->Release();
if (!depthframe) return;
// Get data from frame
unsigned int sz;
UINT16 * buf;
while (!SUCCEEDED(depthframe->AccessUnderlyingBuffer(&sz, &buf))) {
}
HRESULT res = S_OK;
res = mapper->MapDepthFrameToCameraSpace(
KinectColorWidth*KinectColorHeight, buf, // Depth frame data and size of depth frame
KinectColorWidth*KinectColorHeight, depth2xyz); // Output CameraSpacePoint array and size
// Process depth frame data...
if (depthframe) depthframe->Release();
}
示例10: update
void KinectHDFaceGrabber::update()
{
if (!m_pColorFrameReader || !m_pBodyFrameReader){
return;
}
IColorFrame* pColorFrame = nullptr;
HRESULT hr = m_pColorFrameReader->AcquireLatestFrame(&pColorFrame);
IDepthFrame* depthFrame = nullptr;
if (SUCCEEDED(hr)){
hr = m_pDepthFrameReader->AcquireLatestFrame(&depthFrame);
}
if (SUCCEEDED(hr)){
ColorImageFormat imageFormat = ColorImageFormat_None;
if (SUCCEEDED(hr)){
hr = pColorFrame->get_RawColorImageFormat(&imageFormat);
}
if (SUCCEEDED(hr)){
UINT nBufferSize = m_colorWidth * m_colorHeight * sizeof(RGBQUAD);
hr = pColorFrame->CopyConvertedFrameDataToArray(nBufferSize, reinterpret_cast<BYTE*>(m_colorBuffer.data()), ColorImageFormat_Bgra);
}
if (SUCCEEDED(hr)){
hr = depthFrame->CopyFrameDataToArray(m_depthBuffer.size(), &m_depthBuffer[0]);
}
if (SUCCEEDED(hr)){
renderColorFrameAndProcessFaces();
}
}
SafeRelease(depthFrame);
SafeRelease(pColorFrame);
}
示例11: GetDepthImageData
HRESULT KinectHandler::GetDepthImageData(RGBQUAD* &dest)
{
if (!m_pMultiFrameReader)
{
cout << "No frame reader!" << endl;
return E_FAIL;
}
IDepthFrame* pDepthFrame = NULL;
IMultiSourceFrame* pMultiSourceFrame = NULL;
HRESULT hr = m_pMultiFrameReader->AcquireLatestFrame(&pMultiSourceFrame);
if (SUCCEEDED(hr))
{
IDepthFrameReference* pDepthFrameReference = NULL;
hr = pMultiSourceFrame->get_DepthFrameReference(&pDepthFrameReference);
if (SUCCEEDED(hr))
{
hr = pDepthFrameReference->AcquireFrame(&pDepthFrame);
}
SafeRelease(pDepthFrameReference);
}
if (SUCCEEDED(hr))
{
INT64 nTime = 0;
IFrameDescription* pDepthFrameDescription = NULL;
int nDepthWidth = 0;
int nDepthHeight = 0;
USHORT nDepthMinReliableDistance = 0;
USHORT nDepthMaxDistance = 0;
UINT nDepthBufferSize = 0;
UINT16 *pDepthBuffer = NULL;
hr = pDepthFrame->get_RelativeTime(&nTime);
if (SUCCEEDED(hr))
{
hr = pDepthFrame->get_FrameDescription(&pDepthFrameDescription);
}
if (SUCCEEDED(hr))
{
hr = pDepthFrameDescription->get_Width(&nDepthWidth);
}
if (SUCCEEDED(hr))
{
hr = pDepthFrameDescription->get_Height(&nDepthHeight);
}
if (SUCCEEDED(hr))
{
hr = pDepthFrame->get_DepthMinReliableDistance(&nDepthMinReliableDistance);
}
if (SUCCEEDED(hr))
{
// In order to see the full range of depth (including the less reliable far field depth)
// we are setting nDepthMaxDistance to the extreme potential depth threshold
nDepthMaxDistance = USHRT_MAX;
// Note: If you wish to filter by reliable depth distance, uncomment the following line.
//// hr = pDepthFrame->get_DepthMaxReliableDistance(&nDepthMaxDistance);
}
if (SUCCEEDED(hr))
{
hr = pDepthFrame->AccessUnderlyingBuffer(&nDepthBufferSize, &pDepthBuffer);
}
if (SUCCEEDED(hr))
{
//RGBQUAD* pRGBX = new RGBQUAD[cDepthWidth * cDepthHeight];
// end pixel is start + width*height - 1
cout << "w:" << nDepthWidth << " h:" << nDepthHeight << endl;
cout << "buffersize:" << nDepthBufferSize << endl;
const UINT16* pBufferEnd = pDepthBuffer + (nDepthWidth * nDepthHeight);
RGBQUAD* auxiliar = m_pDepthRGBX;
//const UINT16* pBufferEnd = pDepthBuffer + (640 * 480);
cout << "bufferLocation:" << pDepthBuffer << endl;
cout << "bufferend:" << pBufferEnd << endl;
int counter = 0;
while (pDepthBuffer < pBufferEnd)
{
//cout << "now:" << pDepthBuffer << " end:" << pBufferEnd << endl;
USHORT depth = *pDepthBuffer;
//cout << "now:" << pDepthBuffer << " end:" << pBufferEnd << endl;
// To convert to a byte, we're discarding the most-significant
// rather than least-significant bits.
// We're preserving detail, although the intensity will "wrap."
// Values outside the reliable depth range are mapped to 0 (black).
// Note: Using conditionals in this loop could degrade performance.
// Consider using a lookup table instead when writing production code.
BYTE intensity = static_cast<BYTE>((depth >= nDepthMinReliableDistance) && (depth <= nDepthMaxDistance) ? (depth % 256) : 0);
auxiliar->rgbBlue = intensity;
//.........这里部分代码省略.........
示例12: main
int main(int argc, char** argv)
{
// 1a. Get default Sensor
cout << "Try to get default sensor" << endl;
IKinectSensor* pSensor = nullptr;
if (GetDefaultKinectSensor(&pSensor) != S_OK)
{
cerr << "Get Sensor failed" << endl;
}
else
{
// 1b. Open sensor
cout << "Try to open sensor" << endl;
if (pSensor->Open() != S_OK)
{
cerr << "Can't open sensor" << endl;
}
else
{
// 2a. Get frame source
cout << "Try to get source" << endl;
IDepthFrameSource* pFrameSource = nullptr;
if (pSensor->get_DepthFrameSource(&pFrameSource) != S_OK)
{
cerr << "Can't get frame source" << endl;
}
else
{
// 2b. Get frame description
int iWidth = 0;
int iHeight = 0;
IFrameDescription* pFrameDescription = nullptr;
if (pFrameSource->get_FrameDescription(&pFrameDescription) == S_OK)
{
pFrameDescription->get_Width(&iWidth);
pFrameDescription->get_Height(&iHeight);
pFrameDescription->Release();
pFrameDescription = nullptr;
}
// 2c. get some dpeth only meta
UINT16 uDepthMin = 0, uDepthMax = 0;
pFrameSource->get_DepthMinReliableDistance(&uDepthMin);
pFrameSource->get_DepthMaxReliableDistance(&uDepthMax);
cout << "Reliable Distance: " << uDepthMin << " - " << uDepthMax << endl;
// perpare OpenCV
cv::Mat mDepthImg(iHeight, iWidth, CV_16UC1);
cv::Mat mImg8bit(iHeight, iWidth, CV_8UC1);
cv::namedWindow( "Depth Map" );
// 3a. get frame reader
cout << "Try to get frame reader" << endl;
IDepthFrameReader* pFrameReader = nullptr;
if (pFrameSource->OpenReader(&pFrameReader) != S_OK)
{
cerr << "Can't get frame reader" << endl;
}
else
{
// Enter main loop
cout << "Enter main loop" << endl;
while (true)
{
// 4a. Get last frame
IDepthFrame* pFrame = nullptr;
if (pFrameReader->AcquireLatestFrame(&pFrame) == S_OK)
{
// 4c. copy the depth map to image
if (pFrame->CopyFrameDataToArray(iWidth * iHeight, reinterpret_cast<UINT16*>(mDepthImg.data)) == S_OK)
{
// 4d. convert from 16bit to 8bit
mDepthImg.convertTo(mImg8bit, CV_8U, 255.0f / uDepthMax);
cv::imshow("Depth Map", mImg8bit);
}
else
{
cerr << "Data copy error" << endl;
}
// 4e. release frame
pFrame->Release();
}
// 4f. check keyboard input
if (cv::waitKey(30) == VK_ESCAPE){
break;
}
}
// 3b. release frame reader
cout << "Release frame reader" << endl;
pFrameReader->Release();
pFrameReader = nullptr;
}
// 2d. release Frame source
cout << "Release frame source" << endl;
pFrameSource->Release();
pFrameSource = nullptr;
//.........这里部分代码省略.........
示例13: capture
void capture(Image::Ptr& pImage)
{
HRESULT hr;
if (m_pMultiSourceFrameReader==nullptr)
{
camera->getContext().error("CameraKinectDevice::capture: m_pMultiSourceFrameReader is nullptr\n");
// this is bad news - perhaps throw?
return; // @@@
}
IMultiSourceFrame* pMultiSourceFrame = nullptr;
IDepthFrame* pDepthFrame = nullptr;
IColorFrame* pColorFrame = nullptr;
const golem::MSecTmU32 waitStep = 1;
golem::MSecTmU32 timeWaited = 0;
golem::Sleep timer;
while (FAILED(hr = m_pMultiSourceFrameReader->AcquireLatestFrame(&pMultiSourceFrame)))
{
// this is in CameraOpenNI, but suspect may be causing problem here
// if (camera->isTerminating()) return;
timer.msleep(waitStep);
timeWaited += waitStep;
if (timeWaited >= timeout)
{
camera->getContext().error("CameraKinectDevice::capture: failed to acquire frame within %d ms\n", timeout);
// keep going - don't return with nothing; reset stopwatch @@@
timeWaited = 0;
}
}
const golem::SecTmReal systemTime1 = camera->getContext().getTimer().elapsed();
if (SUCCEEDED(hr))
{
IDepthFrameReference* pDepthFrameReference = nullptr;
hr = pMultiSourceFrame->get_DepthFrameReference(&pDepthFrameReference);
if (SUCCEEDED(hr))
{
hr = pDepthFrameReference->AcquireFrame(&pDepthFrame);
}
RELEASE_PTR(pDepthFrameReference);
}
if (SUCCEEDED(hr))
{
IColorFrameReference* pColorFrameReference = nullptr;
hr = pMultiSourceFrame->get_ColorFrameReference(&pColorFrameReference);
if (SUCCEEDED(hr))
{
hr = pColorFrameReference->AcquireFrame(&pColorFrame);
}
RELEASE_PTR(pColorFrameReference);
}
if (SUCCEEDED(hr))
{
INT64 nDepthTime = 0;
IFrameDescription* pDepthFrameDescription = nullptr;
int nDepthWidth = 0;
int nDepthHeight = 0;
UINT nDepthBufferSize = 0;
UINT16 *pDepthBuffer = nullptr;
IFrameDescription* pColorFrameDescription = nullptr;
int nColorWidth = 0;
int nColorHeight = 0;
ColorImageFormat imageFormat = ColorImageFormat_None;
UINT nColorBufferSize = 0;
RGBQUAD *pColorBuffer = nullptr;
// get depth frame data
hr = pDepthFrame->get_RelativeTime(&nDepthTime);
if (SUCCEEDED(hr))
hr = pDepthFrame->get_FrameDescription(&pDepthFrameDescription);
if (SUCCEEDED(hr))
hr = pDepthFrameDescription->get_Width(&nDepthWidth);
if (SUCCEEDED(hr))
hr = pDepthFrameDescription->get_Height(&nDepthHeight);
if (SUCCEEDED(hr))
hr = pDepthFrame->AccessUnderlyingBuffer(&nDepthBufferSize, &pDepthBuffer);
// get color frame data
if (SUCCEEDED(hr))
hr = pColorFrame->get_FrameDescription(&pColorFrameDescription);
if (SUCCEEDED(hr))
hr = pColorFrameDescription->get_Width(&nColorWidth);
if (SUCCEEDED(hr))
//.........这里部分代码省略.........
示例14: main
int main()
{
// name and position windows
cvNamedWindow("Color Probabilistic Tracking - Samples", 1);
cvMoveWindow("Color Probabilistic Tracking - Samples", 0, 0);
cvNamedWindow("Color Probabilistic Tracking - Result", 1);
cvMoveWindow("Color Probabilistic Tracking - Result", 1000, 0);
//control mouse
setMouseCallback("Color Probabilistic Tracking - Samples", onMouse, 0);
cv::setUseOptimized(true);
// Sensor
IKinectSensor* pSensor;
HRESULT hResult = S_OK;
hResult = GetDefaultKinectSensor(&pSensor);
if (FAILED(hResult)) {
std::cerr << "Error : GetDefaultKinectSensor" << std::endl;
return -1;
}
hResult = pSensor->Open();
if (FAILED(hResult)) {
std::cerr << "Error : IKinectSensor::Open()" << std::endl;
return -1;
}
// Source
IColorFrameSource* pColorSource;
hResult = pSensor->get_ColorFrameSource(&pColorSource);
if (FAILED(hResult)) {
std::cerr << "Error : IKinectSensor::get_ColorFrameSource()" << std::endl;
return -1;
}
IDepthFrameSource* pDepthSource;
hResult = pSensor->get_DepthFrameSource(&pDepthSource);
if (FAILED(hResult)) {
std::cerr << "Error : IKinectSensor::get_DepthFrameSource()" << std::endl;
return -1;
}
/*IBodyIndexFrameSource* pBodyIndexSource;
hResult = pSensor->get_BodyIndexFrameSource(&pBodyIndexSource);*/
// Reader
IColorFrameReader* pColorReader;
hResult = pColorSource->OpenReader(&pColorReader);
if (FAILED(hResult)) {
std::cerr << "Error : IColorFrameSource::OpenReader()" << std::endl;
return -1;
}
IDepthFrameReader* pDepthReader;
hResult = pDepthSource->OpenReader(&pDepthReader);
if (FAILED(hResult)) {
std::cerr << "Error : IDepthFrameSource::OpenReader()" << std::endl;
return -1;
}
//IBodyIndexFrameReader* pBodyIndexReader;//saferealease
//hResult = pBodyIndexSource->OpenReader(&pBodyIndexReader);
// Description
IFrameDescription* pColorDescription;
hResult = pColorSource->get_FrameDescription(&pColorDescription);
if (FAILED(hResult)) {
std::cerr << "Error : IColorFrameSource::get_FrameDescription()" << std::endl;
return -1;
}
int colorWidth = 0;
int colorHeight = 0;
pColorDescription->get_Width(&colorWidth); // 1920
pColorDescription->get_Height(&colorHeight); // 1080
unsigned int colorBufferSize = colorWidth * colorHeight * 4 * sizeof(unsigned char);
cv::Mat colorBufferMat(colorHeight, colorWidth, CV_8UC4);
cv::Mat colorMat(colorHeight / 2, colorWidth / 2, CV_8UC4);
cv::namedWindow("Color");
RGBQUAD* m_pDepthRGBX;
m_pDepthRGBX = new RGBQUAD[512 * 424];// create heap storage for color pixel data in RGBX format
IFrameDescription* pDepthDescription;
hResult = pDepthSource->get_FrameDescription(&pDepthDescription);
if (FAILED(hResult)) {
std::cerr << "Error : IDepthFrameSource::get_FrameDescription()" << std::endl;
return -1;
}
int depthWidth = 0;
int depthHeight = 0;
pDepthDescription->get_Width(&depthWidth); // 512
pDepthDescription->get_Height(&depthHeight); // 424
unsigned int depthBufferSize = depthWidth * depthHeight * sizeof(unsigned short);
cv::Mat depthBufferMat(depthHeight, depthWidth, CV_16UC1);
//.........这里部分代码省略.........
示例15: Update
/// <summary>
/// Main processing function
/// </summary>
void CCoordinateMappingBasics::Update()
{
if (!m_pMultiSourceFrameReader)
{
return;
}
IMultiSourceFrame* pMultiSourceFrame = NULL;
IDepthFrame* pDepthFrame = NULL;
IColorFrame* pColorFrame = NULL;
IBodyIndexFrame* pBodyIndexFrame = NULL;
IBodyFrame* pBodyFrame = NULL;
HRESULT hr = m_pMultiSourceFrameReader->AcquireLatestFrame(&pMultiSourceFrame);
if (SUCCEEDED(hr))
{
IDepthFrameReference* pDepthFrameReference = NULL;
hr = pMultiSourceFrame->get_DepthFrameReference(&pDepthFrameReference);
if (SUCCEEDED(hr))
{
hr = pDepthFrameReference->AcquireFrame(&pDepthFrame);
}
SafeRelease(pDepthFrameReference);
}
if (SUCCEEDED(hr))
{
IColorFrameReference* pColorFrameReference = NULL;
hr = pMultiSourceFrame->get_ColorFrameReference(&pColorFrameReference);
if (SUCCEEDED(hr))
{
hr = pColorFrameReference->AcquireFrame(&pColorFrame);
}
SafeRelease(pColorFrameReference);
}
if (SUCCEEDED(hr))
{
IBodyIndexFrameReference* pBodyIndexFrameReference = NULL;
hr = pMultiSourceFrame->get_BodyIndexFrameReference(&pBodyIndexFrameReference);
if (SUCCEEDED(hr))
{
hr = pBodyIndexFrameReference->AcquireFrame(&pBodyIndexFrame);
}
SafeRelease(pBodyIndexFrameReference);
}
if (SUCCEEDED(hr))
{
IBodyFrameReference* pBodyFrameReference = NULL;
hr = pMultiSourceFrame->get_BodyFrameReference(&pBodyFrameReference);
if (SUCCEEDED(hr))
{
hr = pBodyFrameReference->AcquireFrame(&pBodyFrame);
}
SafeRelease(pBodyFrameReference);
}
if (SUCCEEDED(hr))
{
// Depth
INT64 nDepthTime = 0;
IFrameDescription* pDepthFrameDescription = NULL;
int nDepthWidth = 0;
int nDepthHeight = 0;
UINT nDepthBufferSize = 0;
UINT16 *pDepthBuffer = NULL;
// Color
IFrameDescription* pColorFrameDescription = NULL;
int nColorWidth = 0;
int nColorHeight = 0;
ColorImageFormat imageFormat = ColorImageFormat_None;
UINT nColorBufferSize = 0;
RGBQUAD *pColorBuffer = NULL;
// BodyIndex
IFrameDescription* pBodyIndexFrameDescription = NULL;
int nBodyIndexWidth = 0;
int nBodyIndexHeight = 0;
UINT nBodyIndexBufferSize = 0;
BYTE *pBodyIndexBuffer = NULL;
// Body
IBody* ppBodies[BODY_COUNT] = { 0 };
// get depth frame data
//.........这里部分代码省略.........