当前位置: 首页>>代码示例>>C++>>正文


C++ IDepthFrame::Release方法代码示例

本文整理汇总了C++中IDepthFrame::Release方法的典型用法代码示例。如果您正苦于以下问题:C++ IDepthFrame::Release方法的具体用法?C++ IDepthFrame::Release怎么用?C++ IDepthFrame::Release使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在IDepthFrame的用法示例。


在下文中一共展示了IDepthFrame::Release方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: idle

void idle()
{
	// Read color data
	IColorFrame* pCFrame = nullptr;
	if (pColorFrameReader->AcquireLatestFrame(&pCFrame) == S_OK)
	{
		pCFrame->CopyConvertedFrameDataToArray(uColorBufferSize, pColorBuffer, ColorImageFormat_Rgba);

		pCFrame->Release();
		pCFrame = nullptr;
	}

	// Read depth data
	IDepthFrame* pDFrame = nullptr;
	if (pDepthFrameReader->AcquireLatestFrame(&pDFrame) == S_OK)
	{
		pDFrame->CopyFrameDataToArray(uDepthPointNum, pDepthBuffer);

		pDFrame->Release();
		pDFrame = nullptr;

		// map to camera space
		pCoordinateMapper->MapColorFrameToCameraSpace(uDepthPointNum, pDepthBuffer, uColorPointNum, pCSPoints);
	}
}
开发者ID:hitsjt,项目名称:Kinect-project,代码行数:25,代码来源:capture.cpp

示例2: glBindTexture

// デプスデータを取得する
GLuint KinectV2::getDepth() const
{
  // デプスのテクスチャを指定する
  glBindTexture(GL_TEXTURE_2D, depthTexture);

  // 次のデプスのフレームデータが到着していれば
  IDepthFrame *depthFrame;
  if (depthReader->AcquireLatestFrame(&depthFrame) == S_OK)
  {
    // デプスデータのサイズと格納場所を得る
    UINT depthSize;
    UINT16 *depthBuffer;
    depthFrame->AccessUnderlyingBuffer(&depthSize, &depthBuffer);

    // カラーのテクスチャ座標を求めてバッファオブジェクトに転送する
    glBindBuffer(GL_ARRAY_BUFFER, coordBuffer);
    ColorSpacePoint *const texcoord(static_cast<ColorSpacePoint *>(glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY)));
    coordinateMapper->MapDepthFrameToColorSpace(depthCount, depthBuffer, depthCount, texcoord);
    glUnmapBuffer(GL_ARRAY_BUFFER);

    // デプスデータをテクスチャに転送する
    glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, depthWidth, depthHeight, GL_RED, GL_UNSIGNED_SHORT, depthBuffer);

    // デプスフレームを開放する
    depthFrame->Release();
  }

  return depthTexture;
}
开发者ID:tokoik,项目名称:projection,代码行数:30,代码来源:KinectV2.cpp

示例3: z

// カメラ座標を取得する
GLuint KinectV2::getPoint() const
{
  // カメラ座標のテクスチャを指定する
  glBindTexture(GL_TEXTURE_2D, pointTexture);

  // 次のデプスのフレームデータが到着していれば
  IDepthFrame *depthFrame;
  if (depthReader->AcquireLatestFrame(&depthFrame) == S_OK)
  {
    // デプスデータのサイズと格納場所を得る
    UINT depthSize;
    UINT16 *depthBuffer;
    depthFrame->AccessUnderlyingBuffer(&depthSize, &depthBuffer);

    // カラーのテクスチャ座標を求めてバッファオブジェクトに転送する
    glBindBuffer(GL_ARRAY_BUFFER, coordBuffer);
    ColorSpacePoint *const texcoord(static_cast<ColorSpacePoint *>(glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY)));
    coordinateMapper->MapDepthFrameToColorSpace(depthCount, depthBuffer, depthCount, texcoord);
    glUnmapBuffer(GL_ARRAY_BUFFER);

    // カメラ座標への変換テーブルを得る
    UINT32 entry;
    PointF *table;
    coordinateMapper->GetDepthFrameToCameraSpaceTable(&entry, &table);

    // すべての点について
    for (unsigned int i = 0; i < entry; ++i)
    {
      // デプス値の単位をメートルに換算する係数
      static const GLfloat zScale(-0.001f);

      // その点のデプス値を得る
      const unsigned short d(depthBuffer[i]);

      // デプス値の単位をメートルに換算する (計測不能点は maxDepth にする)
      const GLfloat z(d == 0 ? -maxDepth : GLfloat(d) * zScale);

      // その点のスクリーン上の位置を求める
      const GLfloat x(table[i].X);
      const GLfloat y(-table[i].Y);

      // その点のカメラ座標を求める
      position[i][0] = x * z;
      position[i][1] = y * z;
      position[i][2] = z;
    }

    // カメラ座標を転送する
    glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, depthWidth, depthHeight, GL_RGB, GL_FLOAT, position);

    // テーブルを開放する
    CoTaskMemFree(table);

    // デプスフレームを開放する
    depthFrame->Release();
  }

  return pointTexture;
}
开发者ID:tokoik,项目名称:projection,代码行数:60,代码来源:KinectV2.cpp

示例4: getFrameData

bool KinectInterface::getFrameData(IMultiSourceFrame* frame, cv::Mat& intensity_mat, cv::Mat& depth_mat, cv::Mat& pos_mat) {
	//Obtain depth frame
	IDepthFrame* depthframe = nullptr;
	if (FAILED(depthFrameReader->AcquireLatestFrame(&depthframe))) return false;
	if (!depthframe) return false;
	// Get data from frame
	unsigned int sz;
	unsigned short* buf;
	if (FAILED(depthframe->AccessUnderlyingBuffer(&sz, &buf))) return false;
	//get depth -> xyz mapping
	if (FAILED(mapper->MapDepthFrameToCameraSpace(width*height, buf, width*height, depth2xyz))) return false;
	//get depth -> rgb image mapping 
	if (FAILED(mapper->MapDepthFrameToColorSpace(width*height, buf, width*height, depth2rgb))) return false;
	//save depth
	if (FAILED(depthframe->CopyFrameDataToArray(height * width, depth_data)));

	if (depthframe) depthframe->Release();


	//Obtain RGB frame
	IColorFrame* colorframe;
	if (FAILED(colorFrameReader->AcquireLatestFrame(&colorframe))) return false;
	if (!colorframe) return false;

	// Get data from frame
	if (FAILED(colorframe->CopyConvertedFrameDataToArray(colorwidth*colorheight * 4, rgbimage, ColorImageFormat_Rgba))) return false;


	cv::Mat tmp_depth = cv::Mat::zeros(colorheight, colorwidth, CV_16UC1);
	cv::Mat tmp_pos = cv::Mat::zeros(colorheight, colorwidth, CV_32FC3);
	cv::Mat depth_org(height, width, CV_16UC1, depth_data);
	cv::Mat tmp_rgb(colorheight, colorwidth, CV_8UC4, rgbimage);

	// Write color array for vertices
	for (int i = 0; i < width*height; i++) {
		ColorSpacePoint p = depth2rgb[i];
		int iY = (int)(p.Y + 0.5);
		int iX = (int)(p.X + 0.5);
		if (iX >= 0 && iY >= 0 && iX < colorwidth && iY < colorheight) {
			// Check if color pixel coordinates are in bounds
			tmp_depth.at<unsigned short>(iY, iX) = depth_data[i];
			//tmp_pos.at<float>(iY, iX, 0) = depth2xyz[i].X;
			//tmp_pos.at<float>(iY, iX, 1) = depth2xyz[i].Y;
			//tmp_pos.at<float>(iY, iX, 2) = depth2xyz[i].Z;
		}
	}

	if (colorframe) colorframe->Release();

	cv::resize(tmp_rgb(cv::Rect(240, 0, 1440, 1080)), intensity_mat, cv::Size(640, 480));
	cv::resize(tmp_depth(cv::Rect(240, 0, 1440, 1080)), depth_mat, cv::Size(640, 480));
	cv::resize(tmp_pos(cv::Rect(240, 0, 1440, 1080)), pos_mat, cv::Size(640, 480));
	cv::cvtColor(intensity_mat, intensity_mat, CV_RGBA2GRAY);
	return true;
}
开发者ID:danfeiX,项目名称:interaction_flow,代码行数:55,代码来源:kinect_interface_win.cpp

示例5: getDepthData

void MKinect::getDepthData(IMultiSourceFrame* frame, float* dest) {
	IDepthFrame* depthframe;
	IDepthFrameReference* frameref = NULL;
	frame->get_DepthFrameReference(&frameref);
	frameref->AcquireFrame(&depthframe);
	if (frameref) frameref->Release();
	if (!depthframe) return;
	// Get data from frame
	unsigned int sz;
	UINT16 * buf;
	while (!SUCCEEDED(depthframe->AccessUnderlyingBuffer(&sz, &buf))) {

	}
	HRESULT res = S_OK;
	res = mapper->MapDepthFrameToCameraSpace(
		KinectColorWidth*KinectColorHeight, buf,        // Depth frame data and size of depth frame
		KinectColorWidth*KinectColorHeight, depth2xyz); // Output CameraSpacePoint array and size
	// Process depth frame data...
	
	if (depthframe) depthframe->Release();
}
开发者ID:martincrb,项目名称:Face_tracking,代码行数:21,代码来源:MKinect.cpp

示例6: main

int main(int argc, char** argv)
{
	// 1a. Get default Sensor
	cout << "Try to get default sensor" << endl;
	IKinectSensor* pSensor = nullptr;
	if (GetDefaultKinectSensor(&pSensor) != S_OK)
	{
		cerr << "Get Sensor failed" << endl;
	}
	else
	{
		// 1b. Open sensor
		cout << "Try to open sensor" << endl;
		if (pSensor->Open() != S_OK)
		{
			cerr << "Can't open sensor" << endl;
		}
		else
		{
			// 2a. Get frame source
			cout << "Try to get source" << endl;
			IDepthFrameSource* pFrameSource = nullptr;
			if (pSensor->get_DepthFrameSource(&pFrameSource) != S_OK)
			{
				cerr << "Can't get frame source" << endl;
			}
			else
			{
				// 2b. Get frame description
				int		iWidth = 0;
				int		iHeight = 0;
				IFrameDescription* pFrameDescription = nullptr;
				if (pFrameSource->get_FrameDescription(&pFrameDescription) == S_OK)
				{
					pFrameDescription->get_Width(&iWidth);
					pFrameDescription->get_Height(&iHeight);
					pFrameDescription->Release();
					pFrameDescription = nullptr;
				}

				// 2c. get some dpeth only meta
				UINT16 uDepthMin = 0, uDepthMax = 0;
				pFrameSource->get_DepthMinReliableDistance(&uDepthMin);
				pFrameSource->get_DepthMaxReliableDistance(&uDepthMax);
				cout << "Reliable Distance: " << uDepthMin << " - " << uDepthMax << endl;

				// perpare OpenCV
				cv::Mat mDepthImg(iHeight, iWidth, CV_16UC1);
				cv::Mat mImg8bit(iHeight, iWidth, CV_8UC1);
				cv::namedWindow( "Depth Map" );

				// 3a. get frame reader
				cout << "Try to get frame reader" << endl;
				IDepthFrameReader* pFrameReader = nullptr;
				if (pFrameSource->OpenReader(&pFrameReader) != S_OK)
				{
					cerr << "Can't get frame reader" << endl;
				}
				else
				{
					// Enter main loop
					cout << "Enter main loop" << endl;
					while (true)
					{
						// 4a. Get last frame
						IDepthFrame* pFrame = nullptr;
						if (pFrameReader->AcquireLatestFrame(&pFrame) == S_OK)
						{
							// 4c. copy the depth map to image
							if (pFrame->CopyFrameDataToArray(iWidth * iHeight, reinterpret_cast<UINT16*>(mDepthImg.data)) == S_OK)
							{
								// 4d. convert from 16bit to 8bit
								mDepthImg.convertTo(mImg8bit, CV_8U, 255.0f / uDepthMax);
								cv::imshow("Depth Map", mImg8bit);
							}
							else
							{
								cerr << "Data copy error" << endl;
							}

							// 4e. release frame
							pFrame->Release();
						}

						// 4f. check keyboard input
						if (cv::waitKey(30) == VK_ESCAPE){
							break;
						}
					}

					// 3b. release frame reader
					cout << "Release frame reader" << endl;
					pFrameReader->Release();
					pFrameReader = nullptr;
				}

				// 2d. release Frame source
				cout << "Release frame source" << endl;
				pFrameSource->Release();
				pFrameSource = nullptr;
//.........这里部分代码省略.........
开发者ID:AMBITIONBIN,项目名称:KinectForWindows2Sample,代码行数:101,代码来源:DepthWithOpenCV.cpp

示例7: update

void Device::update()
{
	if ( mSensor != 0 ) {
		mSensor->get_Status( &mStatus );
	}

	if ( mFrameReader == 0 ) {
		return;
	}

	IAudioBeamFrame* audioFrame								= 0;
	IBodyFrame* bodyFrame									= 0;
	IBodyIndexFrame* bodyIndexFrame							= 0;
	IColorFrame* colorFrame									= 0;
	IDepthFrame* depthFrame									= 0;
	IMultiSourceFrame* frame								= 0;
	IInfraredFrame* infraredFrame							= 0;
	ILongExposureInfraredFrame* infraredLongExposureFrame	= 0;
	
	HRESULT hr = mFrameReader->AcquireLatestFrame( &frame );

	// TODO audio
	if ( SUCCEEDED( hr ) ) {
		console() << "SUCCEEDED " << getElapsedFrames() << endl;
	}

	if ( SUCCEEDED( hr ) && mDeviceOptions.isBodyEnabled() ) {
		IBodyFrameReference* frameRef = 0;
		hr = frame->get_BodyFrameReference( &frameRef );
		if ( SUCCEEDED( hr ) ) {
			hr = frameRef->AcquireFrame( &bodyFrame );
		}
		if ( frameRef != 0 ) {
			frameRef->Release();
			frameRef = 0;
		}
	}

	if ( SUCCEEDED( hr ) && mDeviceOptions.isBodyIndexEnabled() ) {
		IBodyIndexFrameReference* frameRef = 0;
		hr = frame->get_BodyIndexFrameReference( &frameRef );
		if ( SUCCEEDED( hr ) ) {
			hr = frameRef->AcquireFrame( &bodyIndexFrame );
		}
		if ( frameRef != 0 ) {
			frameRef->Release();
			frameRef = 0;
		}
	}

	if ( SUCCEEDED( hr ) && mDeviceOptions.isColorEnabled() ) {
		IColorFrameReference* frameRef = 0;
		hr = frame->get_ColorFrameReference( &frameRef );
		if ( SUCCEEDED( hr ) ) {
			hr = frameRef->AcquireFrame( &colorFrame );
		}
		if ( frameRef != 0 ) {
			frameRef->Release();
			frameRef = 0;
		}
	}

	if ( SUCCEEDED( hr ) && mDeviceOptions.isDepthEnabled() ) {
		IDepthFrameReference* frameRef = 0;
		hr = frame->get_DepthFrameReference( &frameRef );
		if ( SUCCEEDED( hr ) ) {
			hr = frameRef->AcquireFrame( &depthFrame );
		}
		if ( frameRef != 0 ) {
			frameRef->Release();
			frameRef = 0;
		}
	}

	if ( SUCCEEDED( hr ) && mDeviceOptions.isInfraredEnabled() ) {
		IInfraredFrameReference* frameRef = 0;
		hr = frame->get_InfraredFrameReference( &frameRef );
		if ( SUCCEEDED( hr ) ) {
			hr = frameRef->AcquireFrame( &infraredFrame );
		}
		if ( frameRef != 0 ) {
			frameRef->Release();
			frameRef = 0;
		}
	}

	if ( SUCCEEDED( hr ) && mDeviceOptions.isInfraredLongExposureEnabled() ) {
		ILongExposureInfraredFrameReference* frameRef = 0;
		hr = frame->get_LongExposureInfraredFrameReference( &frameRef );
		if ( SUCCEEDED( hr ) ) {
			hr = frameRef->AcquireFrame( &infraredLongExposureFrame );
		}
		if ( frameRef != 0 ) {
			frameRef->Release();
			frameRef = 0;
		}
	}

	if ( SUCCEEDED( hr ) ) {
		long long time											= 0L;
//.........这里部分代码省略.........
开发者ID:naychrist,项目名称:Cinder-Kinect2,代码行数:101,代码来源:Kinect2.cpp

示例8: main


//.........这里部分代码省略.........
	Mat frame(h,w, CV_8UC3, Scalar(255,255,255));
	Mat display;
	//Mat img;

	char k = 0;
	while(k!=27){

		HRESULT hResult = S_OK;

		if(displayColor){
			IColorFrame* pColorFrame = nullptr;
			hResult = pColorReader->AcquireLatestFrame( &pColorFrame );
			while(!SUCCEEDED(hResult)){
				Sleep(50);
				hResult = pColorReader->AcquireLatestFrame(&pColorFrame);
			}

			if( SUCCEEDED( hResult ) ){
				hResult = pColorFrame->CopyConvertedFrameDataToArray( colorBufferSize, reinterpret_cast<BYTE*>( colorBufferMat.data ), ColorImageFormat::ColorImageFormat_Bgra );
				if( !SUCCEEDED( hResult ) ){
					return false;
				}

				resize(colorBufferMat,display,Size(displaySize*w,displaySize*h));

				flip(display,display,1);

				cv::line(display,Point(displaySize*w/2,0),Point(displaySize*w/2,displaySize*h),Scalar(0,0,255),2);
				cv::line(display,Point(0,displaySize*h/2),Point(displaySize*w,displaySize*h/2),Scalar(0,0,255),2);


				if (pColorFrame )
				{
					pColorFrame ->Release();
					pColorFrame  = NULL;
				}



			}
			else 
				return false;



		}
		else
		{

			IDepthFrame* pDepthFrame = nullptr;
			hResult = pDepthReader->AcquireLatestFrame( &pDepthFrame );
			while(!SUCCEEDED(hResult)){
				Sleep(10);
				hResult = pDepthReader->AcquireLatestFrame( &pDepthFrame );
			}

			if( SUCCEEDED( hResult ) ){
				unsigned int bufferSize = 0;
				unsigned short* buffer = nullptr;
				hResult = pDepthFrame->AccessUnderlyingBuffer( &bufferSize, &buffer );

				if( SUCCEEDED( hResult ) ){
					for( int y = 0; y < h; y++ ){
						for( int x = 0; x < w; x++ ){
							Vec3b intensity = frame.at<Vec3b>(y, x);
							if(buffer[ y * w + (w - x - 1) ]  < hauteurCamera){
开发者ID:nickos64252,项目名称:Fountain,代码行数:67,代码来源:SourceCalibration.cpp

示例9: if

void* Kinect2StreamImpl::populateFrameBuffer(int& buffWidth, int& buffHeight)
{
  buffWidth = 0;
  buffHeight = 0;

  if (m_sensorType == ONI_SENSOR_COLOR) {
    if (m_pFrameReader.color && m_pFrameBuffer.color) {
      buffWidth = 1920;
      buffHeight = 1080;

      IColorFrame* frame = NULL;
      HRESULT hr = m_pFrameReader.color->AcquireLatestFrame(&frame);
      if (SUCCEEDED(hr)) {
        ColorImageFormat imageFormat = ColorImageFormat_None;
        hr = frame->get_RawColorImageFormat(&imageFormat);
        if (SUCCEEDED(hr)) {
          if (imageFormat == ColorImageFormat_Bgra) {
            RGBQUAD* data;
            UINT bufferSize;
            frame->AccessRawUnderlyingBuffer(&bufferSize, reinterpret_cast<BYTE**>(&data));
            memcpy(m_pFrameBuffer.color, data, 1920*1080*sizeof(RGBQUAD));
          }
          else {
            frame->CopyConvertedFrameDataToArray(1920*1080*sizeof(RGBQUAD), reinterpret_cast<BYTE*>(m_pFrameBuffer.color), ColorImageFormat_Bgra);
          }
        }
      }
      if (frame) {
        frame->Release();
      }

      return reinterpret_cast<void*>(m_pFrameBuffer.color);
    }
  }
  else if (m_sensorType == ONI_SENSOR_DEPTH) {
    if (m_pFrameReader.depth && m_pFrameBuffer.depth) {
      buffWidth = 512;
      buffHeight = 424;

      IDepthFrame* frame = NULL;
      HRESULT hr = m_pFrameReader.depth->AcquireLatestFrame(&frame);
      if (SUCCEEDED(hr)) {
        UINT16* data;
        UINT bufferSize;
        frame->AccessUnderlyingBuffer(&bufferSize, &data);
        memcpy(m_pFrameBuffer.depth, data, 512*424*sizeof(UINT16));
      }
      if (frame) {
        frame->Release();
      }

      return reinterpret_cast<void*>(m_pFrameBuffer.depth);
    }
  }
  else { // ONI_SENSOR_IR
    if (m_pFrameReader.infrared && m_pFrameBuffer.infrared) {
      buffWidth = 512;
      buffHeight = 424;

      IInfraredFrame* frame = NULL;
      HRESULT hr = m_pFrameReader.infrared->AcquireLatestFrame(&frame);
      if (SUCCEEDED(hr)) {
        UINT16* data;
        UINT bufferSize;
        frame->AccessUnderlyingBuffer(&bufferSize, &data);
        memcpy(m_pFrameBuffer.infrared, data, 512*424*sizeof(UINT16));
      }
      if (frame) {
        frame->Release();
      }

      return reinterpret_cast<void*>(m_pFrameBuffer.infrared);
    }
  }

  return NULL;
}
开发者ID:mvm9289,项目名称:openni2_kinect2_driver,代码行数:77,代码来源:Kinect2StreamImpl.cpp

示例10: listen

void KinectDevice::listen() {
    if (_listening) throw std::exception("Already listening for new frames");

    _listening = true;
    while (_listening) {
        int idx = WaitForSingleObject((HANDLE) _frameEvent, 100);
        switch (idx) {
        case WAIT_TIMEOUT:
            std::cout << ".";
            continue;
        case WAIT_OBJECT_0:
            IMultiSourceFrameArrivedEventArgs *frameArgs = nullptr;
            IMultiSourceFrameReference *frameRef = nullptr;
            HRESULT hr = _reader->GetMultiSourceFrameArrivedEventData(_frameEvent, &frameArgs);

            if (hr == S_OK) {
                hr = frameArgs->get_FrameReference(&frameRef);
                frameArgs->Release();
            }

            if (hr == S_OK) {
                //if (_lastFrame) _lastFrame->Release();
                hr = frameRef->AcquireFrame(&_lastFrame);
                frameRef->Release();
            }

            if (hr == S_OK) {
                // Store frame data
                //std::cout << "Got a frame YEAH" << std::endl;

                IDepthFrameReference                *depthRef   = nullptr;
                IColorFrameReference                *colorRef   = nullptr;
                IInfraredFrameReference             *irRef      = nullptr;
                ILongExposureInfraredFrameReference *hdirRef    = nullptr;
                IBodyIndexFrameReference            *indexRef   = nullptr;

                IDepthFrame                         *depth      = nullptr;
                IColorFrame                         *color      = nullptr;
                IInfraredFrame                      *ir         = nullptr;
                ILongExposureInfraredFrame          *hdir       = nullptr;
                IBodyIndexFrame                     *index      = nullptr;

                size_t size;
                uint16_t *buff;
                BYTE *cbuff;

                frameLock.lock();
                if (_streams & Streams::DEPTH_STREAM) {
                    _lastFrame->get_DepthFrameReference(&depthRef);
                    depthRef->AcquireFrame(&depth);
                    
                    if (depth) {
                        depthSwap();
                        depth->AccessUnderlyingBuffer(&size, &buff);
                        memcpy(depthData.get(), buff, size * sizeof(uint16_t));
                        depth->Release();
                    }
                    
                    depthRef->Release();
                }
                if (_streams & Streams::COLOR_STREAM) {
                    _lastFrame->get_ColorFrameReference(&colorRef);
                    colorRef->AcquireFrame(&color);
                    //color->AccessUnderlyingBuffer(&size, &buff);
                    //memcpy(_colorData.get(), buff, size);
                    color->Release();
                    colorRef->Release();
                }
                if (_streams & Streams::IR_STREAM) {
                    _lastFrame->get_InfraredFrameReference(&irRef);
                    irRef->AcquireFrame(&ir);
                    ir->AccessUnderlyingBuffer(&size, &buff);
                    memcpy(irData.get(), buff, size);
                    ir->Release();
                    irRef->Release();
                }
                if (_streams & Streams::HDIR_STREAM) {
                    _lastFrame->get_LongExposureInfraredFrameReference(&hdirRef);
                    hdirRef->AcquireFrame(&hdir);
                    hdir->AccessUnderlyingBuffer(&size, &buff);
                    memcpy(hdirData.get(), buff, size);
                    hdir->Release();
                    hdirRef->Release();
                }
                if (_streams & Streams::INDEX_STREAM) {
                    _lastFrame->get_BodyIndexFrameReference(&indexRef); 
                    indexRef->AcquireFrame(&index);
                    index->AccessUnderlyingBuffer(&size, &cbuff);
                    memcpy(indexData.get(), cbuff, size);
                    index->Release();
                    indexRef->Release();
                }

                frameLock.unlock();

                _lastFrame->Release();
            }
        }
    }
}
开发者ID:AliShug,项目名称:Kinecting,代码行数:100,代码来源:KinectDevice.cpp

示例11: CreateSensorTexture

int TextureManager::CreateSensorTexture(char *errorString, const char *name) {
	if (!depth_frame_reader_) {
		sprintf_s(errorString, MAX_ERROR_LENGTH,
			"No depth sensor exists for texture creation");
		return -1;
	}

	glGenTextures(1, textureID + numTextures);
	strcpy_s(textureName[numTextures], TM_MAX_FILENAME_LENGTH, name);
	glBindTexture(GL_TEXTURE_2D, textureID[numTextures]);
	glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
	glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
	//gluBuild2DMipmaps(GL_TEXTURE_2D, GL_RGBA,
	//	TM_NOISE_TEXTURE_SIZE, TM_NOISE_TEXTURE_SIZE,
	//	GL_BGRA, GL_UNSIGNED_BYTE, noiseIntData);

	IDepthFrame* pDepthFrame = NULL;
	HRESULT hr;

	bool hasSucceeded = false;
	for (int tries = 0; tries < 20 && !hasSucceeded; tries++) {
		Sleep(100);
		hr = depth_frame_reader_->AcquireLatestFrame(&pDepthFrame);
		if (SUCCEEDED(hr)) hasSucceeded = true;
	}
	if (!hasSucceeded) {
		sprintf_s(errorString, MAX_ERROR_LENGTH,
			"Could not acquire last depth sensor frame");
		return -1;
	}

	pDepthFrame->get_RelativeTime(&last_frame_time_);

	IFrameDescription* pFrameDescription = NULL;
	int nWidth = 0;
	int nHeight = 0;

	hr = pDepthFrame->get_FrameDescription(&pFrameDescription);
	if (FAILED(hr)) {
		pDepthFrame->Release();
		sprintf_s(errorString, MAX_ERROR_LENGTH,
			"Could not get Depth Frame description");
		return -1;
	}

	pFrameDescription->get_Width(&nWidth);
	pFrameDescription->get_Height(&nHeight);
    depth_sensor_width_ = nWidth;
    depth_sensor_height_ = nHeight;

	if (cpu_depth_sensor_buffer_) delete[] cpu_depth_sensor_buffer_;
	cpu_depth_sensor_buffer_ = new float[nWidth * nHeight];
	memset(cpu_depth_sensor_buffer_, 0, nWidth * nHeight);
	if (smoothed_depth_sensor_buffer_[0]) {
		delete[] smoothed_depth_sensor_buffer_[0];
		delete[] smoothed_depth_sensor_buffer_[1];
	}
	smoothed_depth_sensor_buffer_[0] = new float[nWidth * nHeight];
	smoothed_depth_sensor_buffer_[1] = new float[nWidth * nHeight];
	memset(smoothed_depth_sensor_buffer_[0], 0,
		   nWidth*nHeight*sizeof(smoothed_depth_sensor_buffer_[0][0]));
	memset(smoothed_depth_sensor_buffer_[1], 0,
		   nWidth*nHeight*sizeof(smoothed_depth_sensor_buffer_[1][0]));

	glTexImage2D(GL_TEXTURE_2D, 0, GL_R32F,
		nWidth, nHeight,
		0, GL_RED, GL_FLOAT, smoothed_depth_sensor_buffer_[0]);

	textureWidth[numTextures] = nWidth;
	textureHeight[numTextures] = nHeight;
	numTextures++;

	pFrameDescription->Release();
	pDepthFrame->Release();

	return 0;
}
开发者ID:chock-mostlyharmless,项目名称:mostlyharmless,代码行数:79,代码来源:TextureManager.cpp

示例12: main

int main(int argc, char** argv)
{
   int first_time = 0;
   Size screen_size(1440, 900);//the dst image size,e.g.100x100
   Scalar text_color = Scalar(0, 255, 0);
   Scalar text_color2 = Scalar(0, 255, 255);
   Scalar text_color3 = Scalar(0, 0, 255);

   inhaler_coach coach;
   coach.control = 0;
   thread mThread(test_func, &coach);

   // 1a. Get Kinect Sensor
   cout << "Try to get default sensor" << endl;
   IKinectSensor* pSensor = nullptr;
   if (GetDefaultKinectSensor(&pSensor) != S_OK)
   {
      cerr << "Get Sensor failed" << endl;
      return -1;
   }

   // 1b. Open sensor
   cout << "Try to open sensor" << endl;
   if (pSensor->Open() != S_OK)
   {
      cerr << "Can't open sensor" << endl;
      return -1;
   }

   // 2. Color Related code
   IColorFrameReader* pColorFrameReader = nullptr;
   cv::Mat	mColorImg;
   UINT uBufferSize = 0;
   UINT uColorPointNum = 0;
   int iWidth = 0;
   int iHeight = 0;
   {
      // 2a. Get color frame source
      cout << "Try to get color source" << endl;
      IColorFrameSource* pFrameSource = nullptr;
      if (pSensor->get_ColorFrameSource(&pFrameSource) != S_OK)
      {
         cerr << "Can't get color frame source" << endl;
         return -1;
      }

      // 2b. Get frame description
      cout << "get color frame description" << endl;
      IFrameDescription* pFrameDescription = nullptr;
      if (pFrameSource->get_FrameDescription(&pFrameDescription) == S_OK)
      {
         pFrameDescription->get_Width(&iWidth);
         pFrameDescription->get_Height(&iHeight);
      }
      pFrameDescription->Release();
      pFrameDescription = nullptr;

      // 2c. get frame reader
      cout << "Try to get color frame reader" << endl;
      if (pFrameSource->OpenReader(&pColorFrameReader) != S_OK)
      {
         cerr << "Can't get color frame reader" << endl;
         return -1;
      }

      // 2d. release Frame source
      cout << "Release frame source" << endl;
      pFrameSource->Release();
      pFrameSource = nullptr;

      // Prepare OpenCV data
      mColorImg = cv::Mat(iHeight, iWidth, CV_8UC4);
      uBufferSize = iHeight * iWidth * 4 * sizeof(BYTE);
      uColorPointNum = iHeight * iWidth;
   }
   

   // 3. Depth related code
   IDepthFrameReader* pDepthFrameReader = nullptr;
   UINT uDepthPointNum = 0;
   int iDepthWidth = 0, iDepthHeight = 0;
   cout << "Try to get depth source" << endl;
   {
      // Get frame source
      IDepthFrameSource* pFrameSource = nullptr;
      if (pSensor->get_DepthFrameSource(&pFrameSource) != S_OK)
      {
         cerr << "Can't get depth frame source" << endl;
         return -1;
      }

      // Get frame description
      cout << "get depth frame description" << endl;
      IFrameDescription* pFrameDescription = nullptr;
      if (pFrameSource->get_FrameDescription(&pFrameDescription) == S_OK)
      {
         pFrameDescription->get_Width(&iDepthWidth);
         pFrameDescription->get_Height(&iDepthHeight);
         uDepthPointNum = iDepthWidth * iDepthHeight;
      }
//.........这里部分代码省略.........
开发者ID:berniebear,项目名称:inhaler,代码行数:101,代码来源:main.cpp

示例13: update

void Device::update()
{
    if ( mFrameReader == 0 ) {
        return;
    }

    IAudioBeamFrame* audioFrame								= 0;
    IBodyFrame* bodyFrame									= 0;
    IBodyIndexFrame* bodyIndexFrame							= 0;
    IColorFrame* colorFrame									= 0;
    IDepthFrame* depthFrame									= 0;
    IMultiSourceFrame* frame								= 0;
    IInfraredFrame* infraredFrame							= 0;
    ILongExposureInfraredFrame* infraredLongExposureFrame	= 0;

    HRESULT hr = mFrameReader->AcquireLatestFrame( &frame );

    if ( SUCCEEDED( hr ) && mDeviceOptions.isAudioEnabled() ) {
        // TODO audio
    }

    if ( SUCCEEDED( hr ) && mDeviceOptions.isBodyEnabled() ) {
        IBodyFrameReference* frameRef = 0;
        hr = frame->get_BodyFrameReference( &frameRef );
        if ( SUCCEEDED( hr ) ) {
            hr = frameRef->AcquireFrame( &bodyFrame );
        }
        if ( frameRef != 0 ) {
            frameRef->Release();
            frameRef = 0;
        }
    }

    if ( SUCCEEDED( hr ) && mDeviceOptions.isBodyIndexEnabled() ) {
        IBodyIndexFrameReference* frameRef = 0;
        hr = frame->get_BodyIndexFrameReference( &frameRef );
        if ( SUCCEEDED( hr ) ) {
            hr = frameRef->AcquireFrame( &bodyIndexFrame );
        }
        if ( frameRef != 0 ) {
            frameRef->Release();
            frameRef = 0;
        }
    }

    if ( SUCCEEDED( hr ) && mDeviceOptions.isColorEnabled() ) {
        IColorFrameReference* frameRef = 0;
        hr = frame->get_ColorFrameReference( &frameRef );
        if ( SUCCEEDED( hr ) ) {
            hr = frameRef->AcquireFrame( &colorFrame );
        }
        if ( frameRef != 0 ) {
            frameRef->Release();
            frameRef = 0;
        }
    }

    if ( SUCCEEDED( hr ) && mDeviceOptions.isDepthEnabled() ) {
        IDepthFrameReference* frameRef = 0;
        hr = frame->get_DepthFrameReference( &frameRef );
        if ( SUCCEEDED( hr ) ) {
            hr = frameRef->AcquireFrame( &depthFrame );
        }
        if ( frameRef != 0 ) {
            frameRef->Release();
            frameRef = 0;
        }
    }

    if ( SUCCEEDED( hr ) && mDeviceOptions.isInfraredEnabled() ) {
        IInfraredFrameReference* frameRef = 0;
        hr = frame->get_InfraredFrameReference( &frameRef );
        if ( SUCCEEDED( hr ) ) {
            hr = frameRef->AcquireFrame( &infraredFrame );
        }
        if ( frameRef != 0 ) {
            frameRef->Release();
            frameRef = 0;
        }
    }

    if ( SUCCEEDED( hr ) && mDeviceOptions.isInfraredLongExposureEnabled() ) {
        ILongExposureInfraredFrameReference* frameRef = 0;
        hr = frame->get_LongExposureInfraredFrameReference( &frameRef );
        if ( SUCCEEDED( hr ) ) {
            hr = frameRef->AcquireFrame( &infraredLongExposureFrame );
        }
        if ( frameRef != 0 ) {
            frameRef->Release();
            frameRef = 0;
        }
    }

    if ( SUCCEEDED( hr ) ) {
        long long timeStamp										= 0L;

        // TODO audio

        std::vector<Body> bodies;
        int64_t bodyTime										= 0L;
//.........这里部分代码省略.........
开发者ID:heisters,项目名称:Cinder-Kinect2,代码行数:101,代码来源:Kinect2.cpp


注:本文中的IDepthFrame::Release方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。