本文整理汇总了C++中IDepthFrame::AccessUnderlyingBuffer方法的典型用法代码示例。如果您正苦于以下问题:C++ IDepthFrame::AccessUnderlyingBuffer方法的具体用法?C++ IDepthFrame::AccessUnderlyingBuffer怎么用?C++ IDepthFrame::AccessUnderlyingBuffer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类IDepthFrame
的用法示例。
在下文中一共展示了IDepthFrame::AccessUnderlyingBuffer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: glBindTexture
// デプスデータを取得する
GLuint KinectV2::getDepth() const
{
// デプスのテクスチャを指定する
glBindTexture(GL_TEXTURE_2D, depthTexture);
// 次のデプスのフレームデータが到着していれば
IDepthFrame *depthFrame;
if (depthReader->AcquireLatestFrame(&depthFrame) == S_OK)
{
// デプスデータのサイズと格納場所を得る
UINT depthSize;
UINT16 *depthBuffer;
depthFrame->AccessUnderlyingBuffer(&depthSize, &depthBuffer);
// カラーのテクスチャ座標を求めてバッファオブジェクトに転送する
glBindBuffer(GL_ARRAY_BUFFER, coordBuffer);
ColorSpacePoint *const texcoord(static_cast<ColorSpacePoint *>(glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY)));
coordinateMapper->MapDepthFrameToColorSpace(depthCount, depthBuffer, depthCount, texcoord);
glUnmapBuffer(GL_ARRAY_BUFFER);
// デプスデータをテクスチャに転送する
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, depthWidth, depthHeight, GL_RED, GL_UNSIGNED_SHORT, depthBuffer);
// デプスフレームを開放する
depthFrame->Release();
}
return depthTexture;
}
示例2: z
// カメラ座標を取得する
GLuint KinectV2::getPoint() const
{
// カメラ座標のテクスチャを指定する
glBindTexture(GL_TEXTURE_2D, pointTexture);
// 次のデプスのフレームデータが到着していれば
IDepthFrame *depthFrame;
if (depthReader->AcquireLatestFrame(&depthFrame) == S_OK)
{
// デプスデータのサイズと格納場所を得る
UINT depthSize;
UINT16 *depthBuffer;
depthFrame->AccessUnderlyingBuffer(&depthSize, &depthBuffer);
// カラーのテクスチャ座標を求めてバッファオブジェクトに転送する
glBindBuffer(GL_ARRAY_BUFFER, coordBuffer);
ColorSpacePoint *const texcoord(static_cast<ColorSpacePoint *>(glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY)));
coordinateMapper->MapDepthFrameToColorSpace(depthCount, depthBuffer, depthCount, texcoord);
glUnmapBuffer(GL_ARRAY_BUFFER);
// カメラ座標への変換テーブルを得る
UINT32 entry;
PointF *table;
coordinateMapper->GetDepthFrameToCameraSpaceTable(&entry, &table);
// すべての点について
for (unsigned int i = 0; i < entry; ++i)
{
// デプス値の単位をメートルに換算する係数
static const GLfloat zScale(-0.001f);
// その点のデプス値を得る
const unsigned short d(depthBuffer[i]);
// デプス値の単位をメートルに換算する (計測不能点は maxDepth にする)
const GLfloat z(d == 0 ? -maxDepth : GLfloat(d) * zScale);
// その点のスクリーン上の位置を求める
const GLfloat x(table[i].X);
const GLfloat y(-table[i].Y);
// その点のカメラ座標を求める
position[i][0] = x * z;
position[i][1] = y * z;
position[i][2] = z;
}
// カメラ座標を転送する
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, depthWidth, depthHeight, GL_RGB, GL_FLOAT, position);
// テーブルを開放する
CoTaskMemFree(table);
// デプスフレームを開放する
depthFrame->Release();
}
return pointTexture;
}
示例3: update
cv::Mat capKinect::update(cv::Mat& depth_show)
{
if (!m_pDepthReader) return cv::Mat();
IDepthFrame* pDepthFrame = NULL;
HRESULT hr = m_pDepthReader->AcquireLatestFrame(&pDepthFrame);
cv::Mat re;
if (SUCCEEDED(hr))
{
IFrameDescription* pFrameDescription = NULL;
int nWidth = 0;
int nHeight = 0;
USHORT nDepthMinReliableDistance = 0;
USHORT nDepthMaxDistance = 0;
UINT nBufferSize = 0;
UINT16 *pBuffer = NULL;
if (SUCCEEDED(hr))
{
hr = pDepthFrame->get_FrameDescription(&pFrameDescription);
}
if (SUCCEEDED(hr))
{
hr = pFrameDescription->get_Width(&nWidth);
}
if (SUCCEEDED(hr))
{
hr = pFrameDescription->get_Height(&nHeight);
}
if (SUCCEEDED(hr))
{
hr = pDepthFrame->get_DepthMinReliableDistance(&nDepthMinReliableDistance);
}
if (SUCCEEDED(hr))
{
// In order to see the full range of depth (including the less reliable far field depth)
// we are setting nDepthMaxDistance to the extreme potential depth threshold
nDepthMaxDistance = USHRT_MAX; //here we set maxDepth as 1000 mm (1 m) to simply cut the back background
// Note: If you wish to filter by reliable depth distance, uncomment the following line.
//// hr = pDepthFrame->get_DepthMaxReliableDistance(&nDepthMaxDistance);
}
if (SUCCEEDED(hr))
{
hr = pDepthFrame->AccessUnderlyingBuffer(&nBufferSize, &pBuffer);
}
if (SUCCEEDED(hr))
{
re=capture(pBuffer, nWidth, nHeight, depth_show, nDepthMinReliableDistance, nDepthMaxDistance);
}
if(pFrameDescription)SafeRelease(pFrameDescription);
}
if(pDepthFrame)SafeRelease(pDepthFrame);
return re;
}
示例4: getFrameData
bool KinectInterface::getFrameData(IMultiSourceFrame* frame, cv::Mat& intensity_mat, cv::Mat& depth_mat, cv::Mat& pos_mat) {
//Obtain depth frame
IDepthFrame* depthframe = nullptr;
if (FAILED(depthFrameReader->AcquireLatestFrame(&depthframe))) return false;
if (!depthframe) return false;
// Get data from frame
unsigned int sz;
unsigned short* buf;
if (FAILED(depthframe->AccessUnderlyingBuffer(&sz, &buf))) return false;
//get depth -> xyz mapping
if (FAILED(mapper->MapDepthFrameToCameraSpace(width*height, buf, width*height, depth2xyz))) return false;
//get depth -> rgb image mapping
if (FAILED(mapper->MapDepthFrameToColorSpace(width*height, buf, width*height, depth2rgb))) return false;
//save depth
if (FAILED(depthframe->CopyFrameDataToArray(height * width, depth_data)));
if (depthframe) depthframe->Release();
//Obtain RGB frame
IColorFrame* colorframe;
if (FAILED(colorFrameReader->AcquireLatestFrame(&colorframe))) return false;
if (!colorframe) return false;
// Get data from frame
if (FAILED(colorframe->CopyConvertedFrameDataToArray(colorwidth*colorheight * 4, rgbimage, ColorImageFormat_Rgba))) return false;
cv::Mat tmp_depth = cv::Mat::zeros(colorheight, colorwidth, CV_16UC1);
cv::Mat tmp_pos = cv::Mat::zeros(colorheight, colorwidth, CV_32FC3);
cv::Mat depth_org(height, width, CV_16UC1, depth_data);
cv::Mat tmp_rgb(colorheight, colorwidth, CV_8UC4, rgbimage);
// Write color array for vertices
for (int i = 0; i < width*height; i++) {
ColorSpacePoint p = depth2rgb[i];
int iY = (int)(p.Y + 0.5);
int iX = (int)(p.X + 0.5);
if (iX >= 0 && iY >= 0 && iX < colorwidth && iY < colorheight) {
// Check if color pixel coordinates are in bounds
tmp_depth.at<unsigned short>(iY, iX) = depth_data[i];
//tmp_pos.at<float>(iY, iX, 0) = depth2xyz[i].X;
//tmp_pos.at<float>(iY, iX, 1) = depth2xyz[i].Y;
//tmp_pos.at<float>(iY, iX, 2) = depth2xyz[i].Z;
}
}
if (colorframe) colorframe->Release();
cv::resize(tmp_rgb(cv::Rect(240, 0, 1440, 1080)), intensity_mat, cv::Size(640, 480));
cv::resize(tmp_depth(cv::Rect(240, 0, 1440, 1080)), depth_mat, cv::Size(640, 480));
cv::resize(tmp_pos(cv::Rect(240, 0, 1440, 1080)), pos_mat, cv::Size(640, 480));
cv::cvtColor(intensity_mat, intensity_mat, CV_RGBA2GRAY);
return true;
}
示例5: Mat
void Microsoft2Grabber::DepthFrameArrived(IDepthFrameReference* pDepthFrameReference) {
IDepthFrame* pDepthFrame = NULL;
HRESULT hr = pDepthFrameReference->AcquireFrame(&pDepthFrame);
//HRESULT hr = pDepthFrameReference->AcquireLatestFrame(&pDepthFrame);
if(FAILED(hr))
return;
//cout << "got a depth frame" << endl;
INT64 nDepthTime = 0;
IFrameDescription* pDepthFrameDescription = NULL;
int nDepthWidth = 0;
int nDepthHeight = 0;
UINT nDepthBufferSize = 0;
// get depth frame data
hr = pDepthFrame->get_RelativeTime(&nDepthTime);
if (SUCCEEDED(hr)) {
hr = pDepthFrame->get_FrameDescription(&pDepthFrameDescription);
}
if (SUCCEEDED(hr)) {
hr = pDepthFrameDescription->get_Width(&nDepthWidth);
}
if (SUCCEEDED(hr)) {
hr = pDepthFrameDescription->get_Height(&nDepthHeight);
}
if (SUCCEEDED(hr)) {
hr = pDepthFrame->AccessUnderlyingBuffer(&nDepthBufferSize, &m_pDepthBuffer);
//WaitForSingleObject(hDepthMutex,INFINITE);
Mat tmp = Mat(m_depthSize, DEPTH_PIXEL_TYPE, m_pDepthBuffer, Mat::AUTO_STEP);
MatDepth depth_img = *((MatDepth*)&(tmp.clone()));
m_depthTime = nDepthTime;
if (depth_image_signal_->num_slots () > 0) {
depth_image_signal_->operator()(depth_img);
}
if (num_slots<sig_cb_microsoft_point_cloud_rgba>() > 0 || all_data_signal_->num_slots() > 0 || image_depth_image_signal_->num_slots() > 0) {
//rgb_sync_.add1 (depth_img, m_depthTime);
imageDepthOnlyImageCallback(depth_img);
}
//ReleaseMutex(hDepthMutex);
}
SafeRelease(pDepthFrameDescription);
SafeRelease(pDepthFrame);
}
示例6: getDepthData
void MKinect::getDepthData(IMultiSourceFrame* frame, float* dest) {
IDepthFrame* depthframe;
IDepthFrameReference* frameref = NULL;
frame->get_DepthFrameReference(&frameref);
frameref->AcquireFrame(&depthframe);
if (frameref) frameref->Release();
if (!depthframe) return;
// Get data from frame
unsigned int sz;
UINT16 * buf;
while (!SUCCEEDED(depthframe->AccessUnderlyingBuffer(&sz, &buf))) {
}
HRESULT res = S_OK;
res = mapper->MapDepthFrameToCameraSpace(
KinectColorWidth*KinectColorHeight, buf, // Depth frame data and size of depth frame
KinectColorWidth*KinectColorHeight, depth2xyz); // Output CameraSpacePoint array and size
// Process depth frame data...
if (depthframe) depthframe->Release();
}
示例7: main
//.........这里部分代码省略.........
}
cv::Mat coordinateMapperMat(depthHeight, depthWidth, CV_8UC4);
cv::namedWindow("CoordinateMapper");
unsigned short minDepth, maxDepth;
pDepthSource->get_DepthMinReliableDistance(&minDepth);
pDepthSource->get_DepthMaxReliableDistance(&maxDepth);
while (1) {
double t = (double)getTickCount();
// Color Frame
IColorFrame* pColorFrame = nullptr;
hResult = pColorReader->AcquireLatestFrame(&pColorFrame);
if (SUCCEEDED(hResult)) {
hResult = pColorFrame->CopyConvertedFrameDataToArray(colorBufferSize, reinterpret_cast<BYTE*>(colorBufferMat.data), ColorImageFormat::ColorImageFormat_Bgra);
if (SUCCEEDED(hResult)) {
cv::resize(colorBufferMat, colorMat, cv::Size(), 0.5, 0.5);
}
}
//SafeRelease( pColorFrame );
// Depth Frame
IDepthFrame* pDepthFrame = nullptr;
hResult = pDepthReader->AcquireLatestFrame(&pDepthFrame);
if (SUCCEEDED(hResult)) {
hResult = pDepthFrame->AccessUnderlyingBuffer(&depthBufferSize, reinterpret_cast<UINT16**>(&depthBufferMat.data));
}
if (SUCCEEDED(hResult)) {
hResult = pDepthFrame->AccessUnderlyingBuffer(&depthBufferSize, &pDepthBuffer);
if (SUCCEEDED(hResult))
{
RGBQUAD* pRGBX = m_pDepthRGBX;
// end pixel is start + width*height - 1
const UINT16* pBufferEnd = pDepthBuffer + (512 * 424);
int index = 0;
while (pDepthBuffer < pBufferEnd)
{
USHORT depth = *pDepthBuffer;
BYTE intensity = static_cast<BYTE>((depth >= 50) && (depth <= 5000) ? (depth % 256) : 0);
pRGBX->rgbRed = intensity;
pRGBX->rgbGreen = intensity;
pRGBX->rgbBlue = intensity;
depthData[index] = depth;
++index;
++pRGBX;
++pDepthBuffer;
}
}
}
示例8: _tmain
//.........这里部分代码省略.........
// Point Cloud
NUI_FUSION_IMAGE_FRAME* pPointCloudImageFrame;
hResult = NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE_POINT_CLOUD, width, height, nullptr, &pPointCloudImageFrame );
if( FAILED( hResult ) ){
std::cerr << "Error : NuiFusionCreateImageFrame( POINT_CLOUD )" << std::endl;
return -1;
}
// Surface
NUI_FUSION_IMAGE_FRAME* pSurfaceImageFrame;
hResult = NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE_COLOR, width, height, nullptr, &pSurfaceImageFrame );
if( FAILED( hResult ) ){
std::cerr << "Error : NuiFusionCreateImageFrame( COLOR )" << std::endl;
return -1;
}
// Normal
NUI_FUSION_IMAGE_FRAME* pNormalImageFrame;
hResult = NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE_COLOR, width, height, nullptr, &pNormalImageFrame );
if( FAILED( hResult ) ){
std::cerr << "Error : NuiFusionCreateImageFrame( COLOR )" << std::endl;
return -1;
}
cv::namedWindow( "Surface" );
cv::namedWindow( "Normal" );
while( 1 ){
// Frame
IDepthFrame* pDepthFrame = nullptr;
hResult = pDepthReader->AcquireLatestFrame( &pDepthFrame );
if( SUCCEEDED( hResult ) ){
hResult = pDepthFrame->AccessUnderlyingBuffer( &bufferSize, reinterpret_cast<UINT16**>( &bufferMat.data ) );
if( SUCCEEDED( hResult ) ){
bufferMat.convertTo( depthMat, CV_8U, -255.0f / 8000.0f, 255.0f );
hResult = pReconstruction->DepthToDepthFloatFrame( reinterpret_cast<UINT16*>( bufferMat.data ), width * height * sizeof( UINT16 ), pDepthFloatImageFrame, NUI_FUSION_DEFAULT_MINIMUM_DEPTH/* 0.5[m] */, NUI_FUSION_DEFAULT_MAXIMUM_DEPTH/* 8.0[m] */, true );
if( FAILED( hResult ) ){
std::cerr << "Error :INuiFusionReconstruction::DepthToDepthFloatFrame()" << std::endl;
return -1;
}
}
}
SafeRelease( pDepthFrame );
// Smooting Depth Image Frame
hResult = pReconstruction->SmoothDepthFloatFrame( pDepthFloatImageFrame, pSmoothDepthFloatImageFrame, 1, 0.04f );
if( FAILED( hResult ) ){
std::cerr << "Error :INuiFusionReconstruction::SmoothDepthFloatFrame" << std::endl;
return -1;
}
// Reconstruction Process
pReconstruction->GetCurrentWorldToCameraTransform( &worldToCameraTransform );
hResult = pReconstruction->ProcessFrame( pSmoothDepthFloatImageFrame, NUI_FUSION_DEFAULT_ALIGN_ITERATION_COUNT, NUI_FUSION_DEFAULT_INTEGRATION_WEIGHT, nullptr, &worldToCameraTransform );
if( FAILED( hResult ) ){
static int errorCount = 0;
errorCount++;
if( errorCount >= 100 ) {
errorCount = 0;
ResetReconstruction( pReconstruction, &worldToCameraTransform );
}
}
// Calculate Point Cloud
hResult = pReconstruction->CalculatePointCloud( pPointCloudImageFrame, &worldToCameraTransform );
示例9: capture
void capture(Image::Ptr& pImage)
{
HRESULT hr;
if (m_pMultiSourceFrameReader==nullptr)
{
camera->getContext().error("CameraKinectDevice::capture: m_pMultiSourceFrameReader is nullptr\n");
// this is bad news - perhaps throw?
return; // @@@
}
IMultiSourceFrame* pMultiSourceFrame = nullptr;
IDepthFrame* pDepthFrame = nullptr;
IColorFrame* pColorFrame = nullptr;
const golem::MSecTmU32 waitStep = 1;
golem::MSecTmU32 timeWaited = 0;
golem::Sleep timer;
while (FAILED(hr = m_pMultiSourceFrameReader->AcquireLatestFrame(&pMultiSourceFrame)))
{
// this is in CameraOpenNI, but suspect may be causing problem here
// if (camera->isTerminating()) return;
timer.msleep(waitStep);
timeWaited += waitStep;
if (timeWaited >= timeout)
{
camera->getContext().error("CameraKinectDevice::capture: failed to acquire frame within %d ms\n", timeout);
// keep going - don't return with nothing; reset stopwatch @@@
timeWaited = 0;
}
}
const golem::SecTmReal systemTime1 = camera->getContext().getTimer().elapsed();
if (SUCCEEDED(hr))
{
IDepthFrameReference* pDepthFrameReference = nullptr;
hr = pMultiSourceFrame->get_DepthFrameReference(&pDepthFrameReference);
if (SUCCEEDED(hr))
{
hr = pDepthFrameReference->AcquireFrame(&pDepthFrame);
}
RELEASE_PTR(pDepthFrameReference);
}
if (SUCCEEDED(hr))
{
IColorFrameReference* pColorFrameReference = nullptr;
hr = pMultiSourceFrame->get_ColorFrameReference(&pColorFrameReference);
if (SUCCEEDED(hr))
{
hr = pColorFrameReference->AcquireFrame(&pColorFrame);
}
RELEASE_PTR(pColorFrameReference);
}
if (SUCCEEDED(hr))
{
INT64 nDepthTime = 0;
IFrameDescription* pDepthFrameDescription = nullptr;
int nDepthWidth = 0;
int nDepthHeight = 0;
UINT nDepthBufferSize = 0;
UINT16 *pDepthBuffer = nullptr;
IFrameDescription* pColorFrameDescription = nullptr;
int nColorWidth = 0;
int nColorHeight = 0;
ColorImageFormat imageFormat = ColorImageFormat_None;
UINT nColorBufferSize = 0;
RGBQUAD *pColorBuffer = nullptr;
// get depth frame data
hr = pDepthFrame->get_RelativeTime(&nDepthTime);
if (SUCCEEDED(hr))
hr = pDepthFrame->get_FrameDescription(&pDepthFrameDescription);
if (SUCCEEDED(hr))
hr = pDepthFrameDescription->get_Width(&nDepthWidth);
if (SUCCEEDED(hr))
hr = pDepthFrameDescription->get_Height(&nDepthHeight);
if (SUCCEEDED(hr))
hr = pDepthFrame->AccessUnderlyingBuffer(&nDepthBufferSize, &pDepthBuffer);
// get color frame data
if (SUCCEEDED(hr))
hr = pColorFrame->get_FrameDescription(&pColorFrameDescription);
if (SUCCEEDED(hr))
hr = pColorFrameDescription->get_Width(&nColorWidth);
if (SUCCEEDED(hr))
//.........这里部分代码省略.........
示例10: update
//.........这里部分代码省略.........
kinectBody->get_TrackingId( &id );
std::map<JointType, Body::Joint> jointMap;
for ( int32_t j = 0; j < JointType_Count; ++j ) {
Body::Joint joint(
toVec3f( joints[ j ].Position ),
toQuatf( jointOrientations[ j ].Orientation ),
joints[ j ].TrackingState
);
jointMap.insert( pair<JointType, Body::Joint>( static_cast<JointType>( j ), joint ) );
}
Body body( id, i, jointMap );
bodies.push_back( body );
}
}
}
}
}
if ( mDeviceOptions.isBodyIndexEnabled() ) {
if ( SUCCEEDED( hr ) ) {
hr = bodyIndexFrame->get_RelativeTime( &bodyIndexTime );
}
if ( SUCCEEDED( hr ) ) {
hr = bodyIndexFrame->get_FrameDescription( &bodyIndexFrameDescription );
}
if ( SUCCEEDED( hr ) ) {
hr = bodyIndexFrameDescription->get_Width( &bodyIndexWidth );
}
if ( SUCCEEDED( hr ) ) {
hr = bodyIndexFrameDescription->get_Height( &bodyIndexHeight );
}
if ( SUCCEEDED( hr ) ) {
hr = bodyIndexFrame->AccessUnderlyingBuffer( &bodyIndexBufferSize, &bodyIndexBuffer );
}
if ( SUCCEEDED( hr ) ) {
bodyIndexChannel = Channel8u( bodyIndexWidth, bodyIndexHeight );
memcpy( bodyIndexChannel.getData(), bodyIndexBuffer, bodyIndexWidth * bodyIndexHeight * sizeof( uint8_t ) );
}
}
if ( mDeviceOptions.isColorEnabled() ) {
if ( SUCCEEDED( hr ) ) {
hr = colorFrame->get_FrameDescription( &colorFrameDescription );
if ( SUCCEEDED( hr ) ) {
float vFov = 0.0f;
float hFov = 0.0f;
float dFov = 0.0f;
colorFrameDescription->get_VerticalFieldOfView( &vFov );
colorFrameDescription->get_HorizontalFieldOfView( &hFov );
colorFrameDescription->get_DiagonalFieldOfView( &dFov );
}
}
if ( SUCCEEDED( hr ) ) {
hr = colorFrameDescription->get_Width( &colorWidth );
}
if ( SUCCEEDED( hr ) ) {
hr = colorFrameDescription->get_Height( &colorHeight );
}
if ( SUCCEEDED( hr ) ) {
hr = colorFrame->get_RawColorImageFormat( &colorImageFormat );
}
if ( SUCCEEDED( hr ) ) {
colorBufferSize = colorWidth * colorHeight * sizeof( uint8_t ) * 4;
colorBuffer = new uint8_t[ colorBufferSize ];
hr = colorFrame->CopyConvertedFrameDataToArray( colorBufferSize, reinterpret_cast<uint8_t*>( colorBuffer ), ColorImageFormat_Rgba );
示例11: Update
//.........这里部分代码省略.........
RGBQUAD *pColorBuffer = NULL;
// BodyIndex
IFrameDescription* pBodyIndexFrameDescription = NULL;
int nBodyIndexWidth = 0;
int nBodyIndexHeight = 0;
UINT nBodyIndexBufferSize = 0;
BYTE *pBodyIndexBuffer = NULL;
// Body
IBody* ppBodies[BODY_COUNT] = { 0 };
// get depth frame data
hr = pDepthFrame->get_RelativeTime(&nDepthTime);
// Depth
if (SUCCEEDED(hr))
{
hr = pDepthFrame->get_FrameDescription(&pDepthFrameDescription);
}
if (SUCCEEDED(hr))
{
hr = pDepthFrameDescription->get_Width(&nDepthWidth);
}
if (SUCCEEDED(hr))
{
hr = pDepthFrameDescription->get_Height(&nDepthHeight);
}
if (SUCCEEDED(hr))
{
hr = pDepthFrame->AccessUnderlyingBuffer(&nDepthBufferSize, &pDepthBuffer);
}
// get color frame data
if (SUCCEEDED(hr))
{
hr = pColorFrame->get_FrameDescription(&pColorFrameDescription);
}
if (SUCCEEDED(hr))
{
hr = pColorFrameDescription->get_Width(&nColorWidth);
}
if (SUCCEEDED(hr))
{
hr = pColorFrameDescription->get_Height(&nColorHeight);
}
if (SUCCEEDED(hr))
{
hr = pColorFrame->get_RawColorImageFormat(&imageFormat);
}
if (SUCCEEDED(hr))
{
if (imageFormat == ColorImageFormat_Bgra)
{
hr = pColorFrame->AccessRawUnderlyingBuffer(&nColorBufferSize, reinterpret_cast<BYTE**>(&pColorBuffer));
}
else if (m_pColorRGBX)
{
pColorBuffer = m_pColorRGBX;
示例12: main
//.........这里部分代码省略.........
cv::line(display,Point(displaySize*w/2,0),Point(displaySize*w/2,displaySize*h),Scalar(0,0,255),2);
cv::line(display,Point(0,displaySize*h/2),Point(displaySize*w,displaySize*h/2),Scalar(0,0,255),2);
if (pColorFrame )
{
pColorFrame ->Release();
pColorFrame = NULL;
}
}
else
return false;
}
else
{
IDepthFrame* pDepthFrame = nullptr;
hResult = pDepthReader->AcquireLatestFrame( &pDepthFrame );
while(!SUCCEEDED(hResult)){
Sleep(10);
hResult = pDepthReader->AcquireLatestFrame( &pDepthFrame );
}
if( SUCCEEDED( hResult ) ){
unsigned int bufferSize = 0;
unsigned short* buffer = nullptr;
hResult = pDepthFrame->AccessUnderlyingBuffer( &bufferSize, &buffer );
if( SUCCEEDED( hResult ) ){
for( int y = 0; y < h; y++ ){
for( int x = 0; x < w; x++ ){
Vec3b intensity = frame.at<Vec3b>(y, x);
if(buffer[ y * w + (w - x - 1) ] < hauteurCamera){
int d = buffer[ y * w + (w - x - 1) ];
intensity.val[0] = 2.55*(d % 100);
intensity.val[1] = 1.22*(d % 200);
intensity.val[2] = 256.0*d/hauteurCamera;
}
else
{
intensity.val[0] = 255;
intensity.val[1] = 255;
intensity.val[2] = 255;
}
/*intensity.val[0] = buffer[ y * w + x ] >> 8;
intensity.val[1] = buffer[ y * w + x ] >> 8;
intensity.val[2] = buffer[ y * w + x ] >> 8;*/
frame.at<Vec3b>(y, x) = intensity;
}
}
// changer la couleur du rectangle en fonction de la hauteur des coins (similaire ou non) ( moins de 4cm)
float d1 = buffer[(int)(fountainYPosition-fountainWidth/2.0)*w + (w-1-(int)(fountainXPosition-fountainWidth/2.0))];
float d2 = buffer[(int)(fountainYPosition-fountainWidth/2.0)*w + (w-1-(int)(fountainXPosition+fountainWidth/2.0))];
float d3 = buffer[(int)(fountainYPosition+fountainWidth/2.0)*w + (w-1-(int)(fountainXPosition-fountainWidth/2.0))];
float d4 = buffer[(int)(fountainYPosition+fountainWidth/2.0)*w + (w-1-(int)(fountainXPosition+fountainWidth/2.0))];
if((d1 < 0)||(d1>3500)||(d2 < 0)||(d2>3500)||(d3 < 0)||(d3>3500)||(d4 < 0)||(d4>3500)){
示例13: update
//.........这里部分代码省略.........
colorFrame->get_RelativeTime( &time );
Surface8u colorSurface = Surface8u( colorBuffer, colorWidth, colorHeight, colorWidth * sizeof( uint8_t ) * 4, channelOrder );
mFrame.mSurfaceColor = Surface8u( colorWidth, colorHeight, false, channelOrder );
mFrame.mSurfaceColor.copyFrom( colorSurface, colorSurface.getBounds() );
console() << "Color\n\twidth: " << colorWidth << "\n\theight: " << colorHeight
<< "\n\tbuffer size: " << colorBufferSize << "\n\ttime: " << time << endl;
}
if ( isAllocated && colorBuffer != 0 ) {
delete[] colorBuffer;
colorBuffer = 0;
}
}
}
if ( mDeviceOptions.isDepthEnabled() ) {
if ( SUCCEEDED( hr ) ) {
hr = depthFrame->get_FrameDescription( &depthFrameDescription );
}
if ( SUCCEEDED( hr ) ) {
hr = depthFrameDescription->get_Width( &depthWidth );
}
if ( SUCCEEDED( hr ) ) {
hr = depthFrameDescription->get_Height( &depthHeight );
}
if ( SUCCEEDED( hr ) ) {
hr = depthFrame->get_DepthMinReliableDistance( &depthMinReliableDistance );
}
if ( SUCCEEDED( hr ) ) {
hr = depthFrame->get_DepthMaxReliableDistance( &depthMaxReliableDistance );
}
if ( SUCCEEDED( hr ) ) {
hr = depthFrame->AccessUnderlyingBuffer( &depthBufferSize, &depthBuffer );
}
if ( SUCCEEDED( hr ) ) {
Channel16u depthChannel = Channel16u( depthWidth, depthHeight, depthWidth * sizeof( uint16_t ), 1, depthBuffer );
mFrame.mChannelDepth = Channel16u( depthWidth, depthHeight );
mFrame.mChannelDepth.copyFrom( depthChannel, depthChannel.getBounds() );
console( ) << "Depth\n\twidth: " << depthWidth << "\n\theight: " << depthHeight << endl;
}
}
if ( mDeviceOptions.isInfraredEnabled() ) {
if ( SUCCEEDED( hr ) ) {
hr = infraredFrame->get_FrameDescription( &infraredFrameDescription );
}
if ( SUCCEEDED( hr ) ) {
hr = infraredFrameDescription->get_Width( &infraredWidth );
}
if ( SUCCEEDED( hr ) ) {
hr = infraredFrameDescription->get_Height( &infraredHeight );
}
if ( SUCCEEDED( hr ) ) {
hr = infraredFrame->AccessUnderlyingBuffer( &infraredBufferSize, &infraredBuffer );
}
if ( SUCCEEDED( hr ) ) {
Channel16u infraredChannel = Channel16u( infraredWidth, infraredHeight, infraredWidth * sizeof( uint16_t ), 1, infraredBuffer );
mFrame.mChannelInfrared = Channel16u( infraredWidth, infraredHeight );
mFrame.mChannelInfrared.copyFrom( infraredChannel, infraredChannel.getBounds() );
console( ) << "Infrared\n\twidth: " << infraredWidth << "\n\theight: " << infraredHeight << endl;
}
}
示例14: if
//Hao modified it
void Kinect2Engine::getImages(ITMUChar4Image *rgbImage, ITMShortImage *rawDepthImage)
{
Vector4u *rgb = rgbImage->GetData(MEMORYDEVICE_CPU);
if (colorAvailable)
{
IColorFrame* pColorFrame = NULL;
ColorImageFormat imageFormat = ColorImageFormat_None;
UINT nBufferSize = 0;
RGBQUAD *c_pBuffer = NULL;
HRESULT hr = data->colorFrameReader->AcquireLatestFrame(&pColorFrame);
if (SUCCEEDED(hr))
{
if (SUCCEEDED(hr))
hr = pColorFrame->get_RawColorImageFormat(&imageFormat);
if (SUCCEEDED(hr))
{
if (imageFormat == ColorImageFormat_Bgra)
{
hr = pColorFrame->AccessRawUnderlyingBuffer(&nBufferSize, reinterpret_cast<BYTE**>(&c_pBuffer));
}
else if (m_pColorRGBX)
{
c_pBuffer = m_pColorRGBX;
nBufferSize = imageSize_rgb.x * imageSize_rgb.y * sizeof(RGBQUAD);
hr = pColorFrame->CopyConvertedFrameDataToArray(nBufferSize, reinterpret_cast<BYTE*>(c_pBuffer), ColorImageFormat_Bgra);
}
else
{
hr = E_FAIL;
}
}
if (SUCCEEDED(hr) && c_pBuffer)
{
for (int i = 0; i < imageSize_rgb.x * imageSize_rgb.y; i++)
{
Vector4u newPix;
RGBQUAD oldPix = c_pBuffer[i];
newPix.x = oldPix.rgbRed;
newPix.y = oldPix.rgbGreen;
newPix.z = oldPix.rgbBlue;
newPix.w = 255;
rgb[i] = newPix;
}
}
}
SafeRelease(pColorFrame);
}
else memset(rgb, 0, rgbImage->dataSize * sizeof(Vector4u));
short *depth = rawDepthImage->GetData(MEMORYDEVICE_CPU);
if (depthAvailable)
{
IDepthFrame* pDepthFrame = NULL;
UINT16 *d_pBuffer = NULL;
UINT nBufferSize = 0;
HRESULT hr = data->depthFrameReader->AcquireLatestFrame(&pDepthFrame);
if (SUCCEEDED(hr))
{
if (SUCCEEDED(hr))
hr = pDepthFrame->AccessUnderlyingBuffer(&nBufferSize, &d_pBuffer);
if (SUCCEEDED(hr) && d_pBuffer)
{
for (int i = 0; i < imageSize_d.x * imageSize_d.y; i++)
{
ushort depthPix = d_pBuffer[i];
depth[i] = (depthPix >= nDepthMinReliableDistance) && (depthPix <= nDepthMaxDistance) ? (short)depthPix : -1;
}
}
}
SafeRelease(pDepthFrame);
}
else memset(depth, 0, rawDepthImage->dataSize * sizeof(short));
//out->inputImageType = ITMView::InfiniTAM_FLOAT_DEPTH_IMAGE;
return /*true*/;
}
示例15: Update
/// <summary>
/// Main processing function
/// </summary>
void CDepthBasics::Update()
{
if (!m_pDepthFrameReader)
{
return;
}
IDepthFrame* pDepthFrame = NULL;
HRESULT hrDepth = m_pDepthFrameReader->AcquireLatestFrame(&pDepthFrame);
if (SUCCEEDED(hrDepth))
{
INT64 nTime = 0;
IFrameDescription* pFrameDescription = NULL;
int nWidth = 0;
int nHeight = 0;
USHORT nDepthMinReliableDistance = 0;
USHORT nDepthMaxDistance = 0;
UINT nBufferSize = 0;
UINT16 *pBuffer = NULL;
HRESULT hr = pDepthFrame->get_RelativeTime(&nTime);
if (SUCCEEDED(hr))
{
hr = pDepthFrame->get_FrameDescription(&pFrameDescription);
}
if (SUCCEEDED(hr))
{
hr = pFrameDescription->get_Width(&nWidth);
}
if (SUCCEEDED(hr))
{
hr = pFrameDescription->get_Height(&nHeight);
}
if (SUCCEEDED(hr))
{
hr = pDepthFrame->get_DepthMinReliableDistance(&nDepthMinReliableDistance);
}
if (SUCCEEDED(hr))
{
// In order to see the full range of depth (including the less reliable far field depth)
// we are setting nDepthMaxDistance to the extreme potential depth threshold
nDepthMaxDistance = USHRT_MAX;
// Note: If you wish to filter by reliable depth distance, uncomment the following line.
//// hr = pDepthFrame->get_DepthMaxReliableDistance(&nDepthMaxDistance);
}
if (SUCCEEDED(hr))
{
hr = pDepthFrame->AccessUnderlyingBuffer(&nBufferSize, &pBuffer);
}
if (SUCCEEDED(hr))
{
ProcessDepth(nTime, pBuffer, nWidth, nHeight, nDepthMinReliableDistance, nDepthMaxDistance);
}
SafeRelease(pFrameDescription);
}
SafeRelease(pDepthFrame);
}