本文整理汇总了C++中INuiFrameTexture::Release方法的典型用法代码示例。如果您正苦于以下问题:C++ INuiFrameTexture::Release方法的具体用法?C++ INuiFrameTexture::Release怎么用?C++ INuiFrameTexture::Release使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类INuiFrameTexture
的用法示例。
在下文中一共展示了INuiFrameTexture::Release方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: ProcessDepth
/// <summary>
/// Retrieve depth data from stream frame
/// </summary>
void NuiDepthStream::ProcessDepth()
{
HRESULT hr;
NUI_IMAGE_FRAME imageFrame;
// Attempt to get the depth frame
hr = m_pNuiSensor->NuiImageStreamGetNextFrame(m_hStreamHandle, 0, &imageFrame);
if (FAILED(hr))
{
return;
}
if (m_paused)
{
// Stream paused. Skip frame process and release the frame.
goto ReleaseFrame;
}
BOOL nearMode;
INuiFrameTexture* pTexture;
// Get the depth image pixel texture
hr = m_pNuiSensor->NuiImageFrameGetDepthImagePixelFrameTexture(m_hStreamHandle, &imageFrame, &nearMode, &pTexture);
if (FAILED(hr))
{
goto ReleaseFrame;
}
NUI_LOCKED_RECT lockedRect;
// Lock the frame data so the Kinect knows not to modify it while we're reading it
pTexture->LockRect(0, &lockedRect, NULL, 0);
// Make sure we've received valid data
if (lockedRect.Pitch != 0)
{
// Conver depth data to color image and copy to image buffer
m_imageBuffer.CopyDepth(lockedRect.pBits, lockedRect.size, nearMode, m_depthTreatment);
// Draw ou the data with Direct2D
if (m_pStreamViewer)
{
m_pStreamViewer->SetImage(&m_imageBuffer);
}
}
// Done with the texture. Unlock and release it
pTexture->UnlockRect(0);
pTexture->Release();
ReleaseFrame:
// Release the frame
m_pNuiSensor->NuiImageStreamReleaseFrame(m_hStreamHandle, &imageFrame);
}
示例2: ProcessDepth
/// <summary>
/// Handle new depth data
/// </summary>
/// <returns>S_OK on success, otherwise failure code</returns>
HRESULT CBackgroundRemovalBasics::ProcessDepth()
{
HRESULT hr;
HRESULT bghr = S_OK;
NUI_IMAGE_FRAME imageFrame;
// Attempt to get the depth frame
LARGE_INTEGER depthTimeStamp;
hr = m_pNuiSensor->NuiImageStreamGetNextFrame(m_pDepthStreamHandle, 0, &imageFrame);
if (FAILED(hr))
{
return hr;
}
depthTimeStamp = imageFrame.liTimeStamp;
INuiFrameTexture* pTexture;
// Attempt to get the extended depth texture
hr = m_pNuiSensor->NuiImageFrameGetDepthImagePixelFrameTexture(m_pDepthStreamHandle, &imageFrame, &m_bNearMode, &pTexture);
if (FAILED(hr))
{
return hr;
}
NUI_LOCKED_RECT LockedRect;
// Lock the frame data so the Kinect knows not to modify it while we're reading it
pTexture->LockRect(0, &LockedRect, NULL, 0);
// Make sure we've received valid data, and then present it to the background removed color stream.
if (LockedRect.Pitch != 0)
{
bghr = m_pBackgroundRemovalStream->ProcessDepth(m_depthWidth * m_depthHeight * cBytesPerPixel, LockedRect.pBits, depthTimeStamp);
}
// We're done with the texture so unlock it. Even if above process failed, we still need to unlock and release.
pTexture->UnlockRect(0);
pTexture->Release();
// Release the frame
hr = m_pNuiSensor->NuiImageStreamReleaseFrame(m_pDepthStreamHandle, &imageFrame);
if (FAILED(bghr))
{
return bghr;
}
return hr;
}
示例3: ProcessDepth
/// <summary>
/// Retrieve depth data from stream frame
/// </summary>
void NuiDepthStream::ProcessDepth()
{
HRESULT hr;
NUI_IMAGE_FRAME imageFrame;
if (m_Recording)
{
//////Initializaing a video writer and allocate an image for recording /////////
if ((m_TimerCount++)%FramesPerFile==0)
{
WCHAR szFilename[MAX_PATH] = { 0 };
if (SUCCEEDED(GetFileName(szFilename,_countof(szFilename), m_instanceName, DepthSensor)))
{
char char_szFilename[MAX_PATH] = {0};
size_t convertedChars;
wcstombs_s(&convertedChars,char_szFilename,sizeof(char_szFilename),szFilename,sizeof(char_szFilename));
m_pwriter=cvCreateVideoWriter(char_szFilename,
CV_FOURCC('L', 'A', 'G', 'S'),
//-1, //user specified
FramesPerSecond,m_ImageRes);
//2,m_ImageRes);
}
m_TimerCount%=FramesPerFile;
}
}
// Attempt to get the depth frame
hr = m_pNuiSensor->NuiImageStreamGetNextFrame(m_hStreamHandle, 0, &imageFrame);
if (FAILED(hr))
{
return;
}
if (m_paused)
{
// Stream paused. Skip frame process and release the frame.
goto ReleaseFrame;
}
BOOL nearMode;
INuiFrameTexture* pTexture;
///FT image texture
INuiFrameTexture* pFTTexture;
pFTTexture=imageFrame.pFrameTexture;
// Get the depth image pixel texture
hr = m_pNuiSensor->NuiImageFrameGetDepthImagePixelFrameTexture(m_hStreamHandle, &imageFrame, &nearMode, &pTexture);
if (FAILED(hr))
{
goto ReleaseFrame;
}
NUI_LOCKED_RECT lockedRect;
///FT locked rect
NUI_LOCKED_RECT FTlockedRect;
// Lock the frame data so the Kinect knows not to modify it while we're reading it
pTexture->LockRect(0, &lockedRect, NULL, 0);
// Lock the FT frame data
pFTTexture->LockRect(0, &FTlockedRect, NULL, 0);
// Make sure we've received valid data
if (lockedRect.Pitch != 0)
{
// Conver depth data to color image and copy to image buffer
m_imageBuffer.CopyDepth(lockedRect.pBits, lockedRect.size, nearMode, m_depthTreatment);
// Convert 8 bits depth frame to 12 bits
NUI_DEPTH_IMAGE_PIXEL* depthBuffer = (NUI_DEPTH_IMAGE_PIXEL*)lockedRect.pBits;
cv::Mat depthMat(m_ImageRes.height, m_ImageRes.width, CV_8UC1);
INT cn = 1;
for(int i=0;i<depthMat.rows;i++){
for(int j=0;j<depthMat.cols;j++){
USHORT realdepth = ((depthBuffer->depth)&0x0fff); //Taking 12LSBs for depth
BYTE intensity = realdepth == 0 || realdepth > 4095 ? 0 : 255 - (BYTE)(((float)realdepth / 4095.0f) * 255.0f);//Scaling to 255 scale grayscale
depthMat.data[i*depthMat.cols*cn + j*cn + 0] = intensity;
depthBuffer++;
}
}
// Copy FT depth data to IFTImage buffer
memcpy(m_pFTdepthBuffer->GetBuffer(), PBYTE(FTlockedRect.pBits), std::min(m_pFTdepthBuffer->GetBufferSize(), UINT(pFTTexture->BufferLen())));
if (m_Recording)
{
//*m_pcolorImage = depthMat;
//cvWriteFrame(m_pwriter,m_pcolorImage);
const NUI_SKELETON_FRAME* pSkeletonFrame = m_pPrimaryViewer->getSkeleton();
m_pDepthInbedAPPs->processFrame(depthMat, lockedRect.pBits, m_ImageRes, pSkeletonFrame);
//if (m_TimerCount%FramesPerFile==0)
//{
//.........这里部分代码省略.........
示例4: main
//.........这里部分代码省略.........
// Enable depth test
glEnable(GL_DEPTH_TEST);
// Accept fragment if it closer to the camera than the former one
glDepthFunc(GL_LESS);
glEnable(GL_CULL_FACE);
glEnable(GL_LIGHTING);
glEnable(GL_SMOOTH);//OPENGL INSTANTIATION
HRESULT hr;
NUI_IMAGE_FRAME depthFrame;
HANDLE hDepth;
INuiSensor* pNuiSensor = NULL;
int iSensorCount = 0;
hr = NuiGetSensorCount(&iSensorCount);
if (FAILED(hr))
return hr;
for (int i = 0; i < iSensorCount; i++)
{
INuiSensor* tempSensor;
hr = NuiCreateSensorByIndex(i, &tempSensor);
if (FAILED(hr))
continue;
hr = tempSensor->NuiStatus();
if (S_OK == hr)
{
pNuiSensor = tempSensor;
break;
}
tempSensor->Release();
}
for (int i = 0; i < 2048; i++) {
depthLookUp[i] = rawDepthToMeters(i);
}
rotation = getRotationMatrix(theta, psi, fi);
pNuiSensor->NuiInitialize(NUI_INITIALIZE_FLAG_USES_DEPTH);
pNuiSensor->NuiImageStreamOpen(
NUI_IMAGE_TYPE_DEPTH,
NUI_IMAGE_RESOLUTION_320x240,
0,
2,
NULL,
&hDepth);//KINECT INSTANTIATION
cout << "Starting Main Loop";
static double lastTime = glfwGetTime();
//Main Loop
do
{
double currentTime = glfwGetTime();
float deltaTime = float(currentTime - lastTime);
//Clear color buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glUseProgram(grid);
modelMatrix(MatrixID);
示例5: ProcessDepth
HRESULT KinectSDKGrabber::ProcessDepth(float *buffer)
{
HRESULT hr = S_OK;
NUI_IMAGE_FRAME imageFrame;
// Attempt to get the depth frame
hr = m_pNuiSensor->NuiImageStreamGetNextFrame(m_pDepthStreamHandle, 0, &imageFrame);
HRESULT nodata = E_NUI_FRAME_NO_DATA;
if (FAILED(hr) && hr != E_NUI_FRAME_NO_DATA)
{
std::cout << hr << std::endl;
return hr;
}
BOOL nearMode;
INuiFrameTexture *pTexture = imageFrame.pFrameTexture;
NUI_LOCKED_RECT LockedRect;
// Lock the frame data so the Kinect knows not to modify it while we're reading it
pTexture->LockRect(0, &LockedRect, NULL, 0);
m_pNuiSensor->NuiImageGetColorPixelCoordinateFrameFromDepthPixelFrameAtResolution(
NUI_IMAGE_RESOLUTION_640x480,
NUI_IMAGE_RESOLUTION_640x480,
FrameBufferSize,
(USHORT *)LockedRect.pBits,
FrameBufferSize * 2,
m_colorCoordinates
);
pTexture->UnlockRect(0);
// Get the depth image pixel texture
hr = m_pNuiSensor->NuiImageFrameGetDepthImagePixelFrameTexture(m_pDepthStreamHandle, &imageFrame, &nearMode, &pTexture);
if (FAILED(hr))
{
goto ReleaseFrame;
}
// Lock the frame data so the Kinect knows not to modify it while we're reading it
pTexture->LockRect(0, &LockedRect, NULL, 0);
// Make sure we've received valid data
if (LockedRect.Pitch != 0)
{
const NUI_DEPTH_IMAGE_PIXEL *pBufferRun = reinterpret_cast<const NUI_DEPTH_IMAGE_PIXEL *>(LockedRect.pBits);
// end pixel is start + width*height - 1
const NUI_DEPTH_IMAGE_PIXEL *pBufferEnd = pBufferRun + FrameBufferSize;
int idx = 0;
while (pBufferRun < pBufferEnd)
{
float depth = pBufferRun->depth;
if (depth != 0)
{
buffer[idx] = depth / 1000.f;
}
else
{
buffer[idx] = std::numeric_limits<float>::quiet_NaN();
}
++pBufferRun;
++idx;
}
hr = S_OK;
}
else
{
hr = E_FAIL;
}
// We're done with the texture so unlock it
pTexture->UnlockRect(0);
pTexture->Release();
ReleaseFrame:
// Release the frame
m_pNuiSensor->NuiImageStreamReleaseFrame(m_pDepthStreamHandle, &imageFrame);
return hr;
}
示例6: ProcessDepth
void KinectReader::ProcessDepth()
{
if(!m_pNuiSensor)
{/*
int delta_depth = _max_depth - _min_depth;
for ( unsigned int i = 0; i < 640*160; i++ )
m_depth[i] = (rand()%delta_depth - delta_depth)*0.15f;
*/
return;
}
HRESULT hr;
NUI_IMAGE_FRAME imageFrame;
// Attempt to get the depth frame
hr = m_pNuiSensor->NuiImageStreamGetNextFrame(m_pDepthStreamHandle, 0, &imageFrame);
if (FAILED(hr))
{
return;
}
BOOL nearMode;
INuiFrameTexture* pTexture;
// Get the depth image pixel texture
hr = m_pNuiSensor->NuiImageFrameGetDepthImagePixelFrameTexture(
m_pDepthStreamHandle, &imageFrame, &nearMode, &pTexture);
if (FAILED(hr))
{
goto ReleaseFrame;
}
NUI_LOCKED_RECT LockedRect;
// Lock the frame data so the Kinect knows not to modify it while we're reading it
pTexture->LockRect(0, &LockedRect, NULL, 0);
// Make sure we've received valid data
if (LockedRect.Pitch != 0)
{
// Get the min and max reliable depth for the current frame
int minDepth = _min_depth;//(nearMode ? NUI_IMAGE_DEPTH_MINIMUM_NEAR_MODE : NUI_IMAGE_DEPTH_MINIMUM) >> NUI_IMAGE_PLAYER_INDEX_SHIFT;
int maxDepth = _max_depth;///(nearMode ? NUI_IMAGE_DEPTH_MAXIMUM_NEAR_MODE : NUI_IMAGE_DEPTH_MAXIMUM) >> NUI_IMAGE_PLAYER_INDEX_SHIFT;
float * float_run = m_depth;
const NUI_DEPTH_IMAGE_PIXEL * pBufferRun = reinterpret_cast<const NUI_DEPTH_IMAGE_PIXEL *>(LockedRect.pBits);
// end pixel is start + width*height - 1
const NUI_DEPTH_IMAGE_PIXEL * pBufferEnd = pBufferRun + (cDepthWidth * cDepthHeight);
float intensity = 0.0f;
int delta_depth = maxDepth - minDepth;
float float_per_depth_unit = _active_depth/float(delta_depth );
static int t=0;
++t;
while ( pBufferRun < pBufferEnd )
{
// discard the portion of the depth that contains only the player index
USHORT depth = pBufferRun->depth;
// To convert to a byte, we're discarding the most-significant
// rather than least-significant bits.
// We're preserving detail, although the intensity will "wrap."
// Values outside the reliable depth range are mapped to 0 (black).
// Note: Using conditionals in this loop could degrade performance.
// Consider using a lookup table instead when writing production code.
//if (depth)
intensity = static_cast<float>(depth >= minDepth && depth < maxDepth ? ((depth-minDepth)-delta_depth)*float_per_depth_unit : 0.0f);
//if ( t < 1000)
// intensity = (rand()%delta_depth - delta_depth)*float_per_depth_unit;
// Write out blue byte
*(float_run++) = intensity;
// Increment our index into the Kinect's depth buffer
++pBufferRun;
}
// Draw the data with Direct2D
//m_pDrawDepth->Draw(m_depthRGBX, cDepthWidth * cDepthHeight * cBytesPerPixel);
//image is ready
}
// We're done with the texture so unlock it
pTexture->UnlockRect(0);
pTexture->Release();
ReleaseFrame:
//return;
// Release the frame
m_pNuiSensor->NuiImageStreamReleaseFrame(m_pDepthStreamHandle, &imageFrame);
}