当前位置: 首页>>代码示例>>C++>>正文


C++ VideoStream::readFrame方法代码示例

本文整理汇总了C++中VideoStream::readFrame方法的典型用法代码示例。如果您正苦于以下问题:C++ VideoStream::readFrame方法的具体用法?C++ VideoStream::readFrame怎么用?C++ VideoStream::readFrame使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在VideoStream的用法示例。


在下文中一共展示了VideoStream::readFrame方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: readOpenNiColorAndDepth

int readOpenNiColorAndDepth(VideoStream &color , VideoStream &depth,VideoFrameRef &colorFrame,VideoFrameRef &depthFrame)
{
  #if USE_WAITFORANYSTREAM_TO_GRAB
   #warning "Please turn #define USE_WAITFORANYSTREAM_TO_GRAB 0"
   #warning "This is a bad idea taken from OpenNI2/Samples/SimpleViewer , we dont just want to update 'any' frame we really want to snap BOTH and do that sequentially"
   #warning "It is better to sequencially grab them instead of waiting for any stream a couple of times "
   openni::VideoStream** m_streams = new openni::VideoStream*[2];
   m_streams[0] = &depth;
   m_streams[1] = &color;


  unsigned char haveDepth=0,haveColor=0;
  int changedIndex;
  while ( (!haveDepth) || (!haveColor) )
  {
	openni::Status rc = openni::OpenNI::waitForAnyStream(m_streams, 2, &changedIndex);
	if (rc != openni::STATUS_OK)
	{
		fprintf(stderr,"Wait failed\n");
		return 0 ;
	}

  unsigned int i=0;

	switch (changedIndex)
	{
	case 0:
		depth.readFrame(&depthFrame);
		haveDepth=1;
    break;
	case 1:
		color.readFrame(&colorFrame);
		haveColor=1;
    break;
	default:
		printf("Error in wait\n");
		return 0;
	}
  }

	delete m_streams;
	return 1;
  #else
    //Using serial frame grabbing
    readFrameBlocking(depth,depthFrame,MAX_TRIES_FOR_EACH_FRAME); // depth.readFrame(&depthFrame);
    readFrameBlocking(color,colorFrame,MAX_TRIES_FOR_EACH_FRAME); // color.readFrame(&colorFrame);

    if(depthFrame.isValid() && colorFrame.isValid()) { return 1; }

    fprintf(stderr,"Depth And Color frames are wrong!\n");
  #endif
    return 0;

}
开发者ID:AmmarkoV,项目名称:RGBDAcquisition,代码行数:54,代码来源:OpenNI2Acquisition.cpp

示例2: onNewFrame

void XtionDepthDriverImpl::onNewFrame(VideoStream &stream)
{
  VideoFrameRef ref;
  stream.readFrame(&ref);
  _lastCaptured = XtionDepthImage(ref.getData(), ref.getDataSize(),
    ref.getWidth(), ref.getHeight(), 0, this);
}
开发者ID:Clemensius,项目名称:libkovan,代码行数:7,代码来源:xtion_depth_driver_impl_p.cpp

示例3: onNewFrame

  virtual void onNewFrame(VideoStream& stream)
  {
    ros::Time ts = ros::Time::now();

    VideoFrameRef frame;
    stream.readFrame(&frame);

    sensor_msgs::Image::Ptr img(new sensor_msgs::Image);
    sensor_msgs::CameraInfo::Ptr info(new sensor_msgs::CameraInfo);

    double scale = double(frame.getWidth()) / double(1280);

    info->header.stamp = ts;
    info->header.frame_id = frame_id_;
    info->width = frame.getWidth();
    info->height = frame.getHeight();
    info->K.assign(0);
    info->K[0] = 1050.0 * scale;
    info->K[4] = 1050.0 * scale;
    info->K[2] = frame.getWidth() / 2.0 - 0.5;
    info->K[5] = frame.getHeight() / 2.0 - 0.5;
    info->P.assign(0);
    info->P[0] = 1050.0 * scale;
    info->P[5] = 1050.0 * scale;
    info->P[2] = frame.getWidth() / 2.0 - 0.5;
    info->P[6] = frame.getHeight() / 2.0 - 0.5;

    switch(frame.getVideoMode().getPixelFormat())
    {
    case PIXEL_FORMAT_GRAY8:
      img->encoding = sensor_msgs::image_encodings::MONO8;
      break;
    case PIXEL_FORMAT_GRAY16:
      img->encoding = sensor_msgs::image_encodings::MONO16;
      break;
    case PIXEL_FORMAT_YUV422:
      img->encoding = sensor_msgs::image_encodings::YUV422;
      break;
    case PIXEL_FORMAT_RGB888:
      img->encoding = sensor_msgs::image_encodings::RGB8;
      break;
    case PIXEL_FORMAT_SHIFT_9_2:
    case PIXEL_FORMAT_DEPTH_1_MM:
      img->encoding = sensor_msgs::image_encodings::TYPE_16UC1;
      break;
    default:
      ROS_WARN("Unknown OpenNI pixel format!");
      break;
    }
    img->header.stamp = ts;
    img->header.frame_id = frame_id_;
    img->height = frame.getHeight();
    img->width = frame.getWidth();
    img->step = frame.getStrideInBytes();
    img->data.resize(frame.getDataSize());
    std::copy(static_cast<const uint8_t*>(frame.getData()), static_cast<const uint8_t*>(frame.getData()) + frame.getDataSize(), img->data.begin());

    publish(img, info);
  }
开发者ID:amiltonwong,项目名称:openni2_camera,代码行数:59,代码来源:camera.cpp

示例4: readFrameBlocking

int readFrameBlocking(VideoStream &stream,VideoFrameRef &frame , unsigned int max_tries)
{
  unsigned int tries_for_frame = 0 ;
  while (  tries_for_frame < max_tries  )
          {
            stream.readFrame(&frame);
	        if (frame.isValid()) { return 1; }
	        ++tries_for_frame;
          }

  if (!frame.isValid()) { fprintf(stderr,"Could not get a valid frame even after %u tries \n",max_tries); return 0; }
  return (tries_for_frame<max_tries);
}
开发者ID:AmmarkoV,项目名称:RGBDAcquisition,代码行数:13,代码来源:OpenNI2Acquisition.cpp

示例5: Size

int
main (int argc, char** argv)
{
    Status rc = OpenNI::initialize();
    if (rc != STATUS_OK)
    {
        std::cout << "Initialize failed: " << OpenNI::getExtendedError() << std::endl;
        return 1;
    }

    Device device;
    rc = device.open(ANY_DEVICE);
    if (rc != STATUS_OK)
    {
        std::cout << "Couldn't open device: " << OpenNI::getExtendedError() << std::endl;
        return 2;
    }

    VideoStream stream;
    if (device.getSensorInfo(currentSensor) != NULL)
    {
        rc = stream.create(device, currentSensor);
        if (rc != STATUS_OK)
        {
            std::cout << "Couldn't create stream: " << OpenNI::getExtendedError() << std::endl;
            return 3;
        }
    }

    rc = stream.start();
    if (rc != STATUS_OK)
    {
        std::cout << "Couldn't start the stream: " << OpenNI::getExtendedError() << std::endl;
        return 4;
    }

    VideoFrameRef frame;

    //now open the video writer
    Size S = Size(stream.getVideoMode().getResolutionX(),
                  stream.getVideoMode().getResolutionY());
    VideoWriter outputVideo;
    std::string fileName = "out.avi";
    outputVideo.open(fileName, -1, stream.getVideoMode().getFps(), S, currentSensor == SENSOR_COLOR ? true : false);
    if (!outputVideo.isOpened())
    {
        std::cout  << "Could not open the output video for write: " << fileName << std::endl;
        return -1;
    }

    while (waitKey(50) == -1)
    {
        int changedStreamDummy;
        VideoStream* pStream = &stream;
        rc = OpenNI::waitForAnyStream(&pStream, 1, &changedStreamDummy, SAMPLE_READ_WAIT_TIMEOUT);
        if (rc != STATUS_OK)
        {
            std::cout << "Wait failed! (timeout is " << SAMPLE_READ_WAIT_TIMEOUT << "ms): " << OpenNI::getExtendedError() << std::endl;
            continue;
        }

        rc = stream.readFrame(&frame);
        if (rc != STATUS_OK)
        {
            std::cout << "Read failed:" << OpenNI::getExtendedError() << std::endl;
            continue;
        }

        Mat image;
        switch (currentSensor)
        {
        case SENSOR_COLOR:
            image = Mat(frame.getHeight(), frame.getWidth(), CV_8UC3, (void*)frame.getData());
            break;
        case SENSOR_DEPTH:
            image = Mat(frame.getHeight(), frame.getWidth(), DataType<DepthPixel>::type, (void*)frame.getData());
            break;
        case SENSOR_IR:
            image = Mat(frame.getHeight(), frame.getWidth(), CV_8U, (void*)frame.getData());
            break;
        default:
            break;
        }

        namedWindow( "Display window", WINDOW_AUTOSIZE );    // Create a window for display.
        imshow( "Display window", image );                   // Show our image inside it.

        outputVideo << image;
    }

    stream.stop();
    stream.destroy();
    device.close();
    OpenNI::shutdown();

    return 0;
}
开发者ID:JuliusGel,项目名称:CameraTests,代码行数:97,代码来源:OpenNIRecorder.cpp

示例6: main


//.........这里部分代码省略.........
	rc = depth.start();
	if (rc != STATUS_OK)
	{
		printf("Couldn't start the depth stream\n%s\n", OpenNI::getExtendedError());
		return 5;
	}
	rc = color.start();
	if (rc != STATUS_OK){
		printf("Couldn't start the depth stream\n%s\n", OpenNI::getExtendedError());
		return 6;
	}
	
	VideoFrameRef colorframe;
	VideoFrameRef depthframe;
	int iMaxDepth = depth.getMaxPixelValue();
	int iColorFps = color.getVideoMode().getFps();
	cv::Size iColorFrameSize = cv::Size(color.getVideoMode().getResolutionX(), color.getVideoMode().getResolutionY());

	cv::Mat colorimageRGB;
	cv::Mat colorimageBGR;
	cv::Mat depthimage;
	cv::Mat depthimageScaled;

#ifdef F_RECORDVIDEO

	cv::VideoWriter outputvideo_color;
	cv::FileStorage outputfile_depth;
	
	time_t timenow = time(0);
	tm ltime;
	localtime_s(&ltime, &timenow);
	int tyear = 1900 + ltime.tm_year;
	int tmouth = 1 + ltime.tm_mon;
	int tday = ltime.tm_mday;
	int thour = ltime.tm_hour;
	int tmin = ltime.tm_min;
	int tsecond = ltime.tm_sec;

	string filename_rgb = "RGB/rgb_" + to_string(tyear) + "_" + to_string(tmouth) + "_" + to_string(tday)
		+ "_" + to_string(thour) + "_" + to_string(tmin) + "_" + to_string(tsecond) + ".avi";
	string filename_d = "D/d_" + to_string(tyear) + "_" + to_string(tmouth) + "_" + to_string(tday)
		+ "_" + to_string(thour) + "_" + to_string(tmin) + "_" + to_string(tsecond) + ".yml";

	outputvideo_color.open(filename_rgb, CV_FOURCC('I', '4', '2', '0'), iColorFps, iColorFrameSize, true);
	if (!outputvideo_color.isOpened()){
		cout << "Could not open the output color video for write: " << endl;
		return 7;
	}
	outputfile_depth.open(filename_d, cv::FileStorage::WRITE);
	if (!outputfile_depth.isOpened()){
		cout << "Could not open the output depth file for write: " << endl;
		return 8;
	}

#endif // F_RECORDVIDEO


	// 7. main loop, continue read
	while (!wasKeyboardHit())
	{
		// 8. check is color stream is available
		if (color.isValid()){
			if (color.readFrame(&colorframe) == STATUS_OK){
				colorimageRGB = { colorframe.getHeight(), colorframe.getWidth(), CV_8UC3, (void*)colorframe.getData() };
				cv::cvtColor(colorimageRGB, colorimageBGR, CV_RGB2BGR);
			}
		}

		// 9. check is depth stream is available
		if (depth.isValid()){
			if (depth.readFrame(&depthframe) == STATUS_OK){
				depthimage = { depthframe.getHeight(), depthframe.getWidth(), CV_16UC1, (void*)depthframe.getData() };
				depthimage.convertTo(depthimageScaled, CV_8U, 255.0 / iMaxDepth);
			}
		}

		cv::imshow("Color Image", colorimageBGR);
		cv::imshow("Depth Image", depthimageScaled);

#ifdef F_RECORDVIDEO
		
		outputvideo_color << colorimageBGR;
		outputfile_depth << "Mat" << depthimage;

#endif // F_RECORDVIDEO

		cv::waitKey(10);
	}

	color.stop();
	depth.stop();

	color.destroy();
	depth.destroy();

	device.close();
	OpenNI::shutdown();

	return 0;
}
开发者ID:sc1991327,项目名称:testGroundPlane,代码行数:101,代码来源:main.cpp

示例7: camera_thread

void* camera_thread(void*){
  VideoFrameRef frame;
  VideoFrameRef rgbframe;
  int changed_stream;
  VideoStream* pStream;
  VideoStream* pRgbStream;
  sensor_msgs::CameraInfo rgb_info;
  sensor_msgs::CameraInfo depth_info;
  sensor_msgs::Image image;
  image.header.frame_id=frame_id;
  image.is_bigendian=0;

  int count = 0;
  int num_frames_stat=64;
  struct timeval previous_time;
  gettimeofday(&previous_time, 0);
  uint64_t last_stamp = 0;
  while(run){
    bool new_frame = false;
    uint64_t current_stamp = 0;
    openni::OpenNI::waitForAnyStream(streams, 2, &changed_stream);
    switch (changed_stream)
      {
	//DEPTH
      case 0:
	depth.readFrame(&frame);
	depth_info.header.stamp=ros::Time::now();
	image.header.stamp=ros::Time::now();
	if(!frame.isValid()) break;
	current_stamp=frame.getTimestamp();

	if (current_stamp-last_stamp>5000){
	  count++;
	  new_frame = true;
	}
	last_stamp = current_stamp;

	if (! (count % (_frame_skip)) ){
	  image.header.seq = count;
	  if (! _registration){
	    image.header.frame_id=frame_id+"_depth";
	  } else 
	    image.header.frame_id=frame_id+"_rgb";

	  depth_info.width=frame.getWidth();
	  depth_info.height=frame.getHeight();

	  depth_info.header.seq = count;

    depth_info.distortion_model = sensor_msgs::distortion_models::PLUMB_BOB;
    depth_info.D.resize(5, 0.0);

	  depth_info.K[0]=depth_info.width/(2*tan(depth.getHorizontalFieldOfView()/2)); //fx
	  depth_info.K[4]=depth_info.height/(2*tan(depth.getVerticalFieldOfView()/2));; //fy
	  depth_info.K[2]=depth_info.width/2; //cx
	  depth_info.K[5]=depth_info.height/2; //cy
	  depth_info.K[8]=1;

	  depth_info.header.frame_id=image.header.frame_id;
	  image.height=frame.getHeight();
	  image.width=frame.getWidth();
	  image.encoding="mono16";
	  image.step=frame.getWidth()*2;
	  image.data.resize(image.step*image.height);
	  memcpy((char*)(&image.data[0]),frame.getData(),image.height*image.width*2);
	  pub_depth.publish(image);
	  pub_camera_info_depth.publish(depth_info);
	}
	break;
	//RGB
      case 1:
	rgb.readFrame(&rgbframe);
	image.header.stamp=ros::Time::now();
	rgb_info.header.stamp=ros::Time::now();
	if(!rgbframe.isValid()) break;
	current_stamp=rgbframe.getTimestamp();

	if (current_stamp-last_stamp>5000){
	  count++;
	  new_frame = true;
	}
	last_stamp = current_stamp;

	if (_gain>=0) {
	  if (count > 64 && gain_status==0){
	    rgb.getCameraSettings()->setAutoExposureEnabled(false);
	    rgb.getCameraSettings()->setAutoWhiteBalanceEnabled(false);
	    rgb.getCameraSettings()->setExposure(1);
	    rgb.getCameraSettings()->setGain(100);
	    gain_status=1;
	  } else if (count > 128 && gain_status==1){
	    rgb.getCameraSettings()->setExposure(_exposure);
	    rgb.getCameraSettings()->setGain(_gain);
	    gain_status=2;
	  }
	}

	if (! (count%(_frame_skip)) ){
	  rgb_info.header.frame_id=frame_id+"_rgb";
	  rgb_info.width=rgbframe.getWidth();
//.........这里部分代码省略.........
开发者ID:mauriliodc,项目名称:thin_astra,代码行数:101,代码来源:thin_astra_node.cpp

示例8: _tmain

int _tmain(int argc, _TCHAR* argv[])
{
	DepthDetector detector(ThresholdMin, ThresholdMax);
	ScanLineSegmenter segmenter;

	OpenNI::initialize();

	Device device;
	if (device.open(ANY_DEVICE) != STATUS_OK)
	{
		std::cout << "could not open any device\r\n";
		return 1;
	}

	if (device.hasSensor(SENSOR_DEPTH))
	{
		auto info = device.getSensorInfo(SENSOR_DEPTH);
		auto& modes = info->getSupportedVideoModes();
		std::cout << "depth sensor supported modes:\r\n";
		for (int i = 0; i < modes.getSize(); ++i)
		{
			auto& mode = modes[i];
			std::cout << "pixel format: " << mode.getPixelFormat() << "\t with: " << mode.getResolutionX() << "x" << mode.getResolutionY() << "@" << mode.getFps() << " fps\r\n";
		}
	}

	VideoStream stream;
	stream.create(device, SENSOR_DEPTH);
	VideoMode mode;
	mode.setFps(25);
	mode.setPixelFormat(PIXEL_FORMAT_DEPTH_1_MM);
	mode.setResolution(320, 240);
	stream.setMirroringEnabled(true);
	stream.setVideoMode(mode);
	stream.start();

	std::cout << "press any key to capture background\r\n";
	std::cin.get();

	VideoFrameRef frame;
	stream.readFrame(&frame);

	DepthImage image(320, 240);
	copyFrameToImage(frame, image);

	detector.background(image);

	std::cout << "starting capture loop\r\n";

	CenterPointExtractor centerPointExtractor(MinBlobSize);
	std::chrono::high_resolution_clock timer;
	auto startTime = timer.now();
	int frameId = 0;
	while (true)
	{
		stream.readFrame(&frame);

		copyFrameToImage(frame, image);

		detector.detect(image);

		std::vector<LineSegment> segments;
		segmenter.segment(detector.mask(), segments);


		std::vector<std::pair<float, float>> centerPoints;
		centerPointExtractor.extract(segments, centerPoints);

		if (centerPoints.size())
		{
			std::cout << "point count: " << centerPoints.size();
		
			std::cout << "\t points: ";
		
			for (auto& point : centerPoints)
			{
				std::cout << "(" << point.first << ", " << point.second << ")  ";
			}
			std::cout << "\r\n";
		}

		++frameId;

		
		
		if (frameId % 64 == 0)
		{
			auto stopTime = timer.now();
			
			auto elapsedTime = stopTime - startTime;
			auto elapsedMilliseconds = std::chrono::duration_cast<std::chrono::milliseconds>(elapsedTime).count();

			std::cout << "\t total frames: " << frameId << "\t fps: " << elapsedMilliseconds / 64 << std::endl;

			startTime = stopTime;
		}
		
	}
	
	openni::OpenNI::shutdown();
//.........这里部分代码省略.........
开发者ID:patrigg,项目名称:TouchTable,代码行数:101,代码来源:TouchDebugOutput.cpp

示例9: onNewFrame

void DepthCallback::onNewFrame(VideoStream& stream)
{
    stream.readFrame(&m_frame);

    analyzeFrame(m_frame);
}
开发者ID:RaresAmbrus,项目名称:scitos_robot,代码行数:6,代码来源:DepthCallback.cpp

示例10: _tmain

int _tmain(int argc, _TCHAR* argv[])
{
	sdl::Application app;

	DepthDetector detector(ThresholdMin, ThresholdMax);
	ScanLineSegmenter segmenter;

	OpenNI::initialize();

	Device device;
	if (device.open(ANY_DEVICE) != STATUS_OK)
	{
		std::cout << "could not open any device\r\n";
		return 1;
	}

	if (device.hasSensor(SENSOR_DEPTH))
	{
		auto info = device.getSensorInfo(SENSOR_DEPTH);
		auto& modes = info->getSupportedVideoModes();
		std::cout << "depth sensor supported modes:\r\n";
		for (int i = 0; i < modes.getSize(); ++i)
		{
			auto& mode = modes[i];
			std::cout << "pixel format: " << mode.getPixelFormat() << "\t with: " << mode.getResolutionX() << "x" << mode.getResolutionY() << "@" << mode.getFps() << " fps\r\n";
		}
	}

	VideoStream stream;
	stream.create(device, SENSOR_DEPTH);
	VideoMode mode;
	mode.setFps(25);
	mode.setPixelFormat(PIXEL_FORMAT_DEPTH_1_MM);
	mode.setResolution(320, 240);
	stream.setMirroringEnabled(true);
	stream.setVideoMode(mode);
	stream.start();

	std::cout << "press any key to capture background\r\n";
	std::cin.get();

	VideoFrameRef frame;
	stream.readFrame(&frame);

	DepthImage image(320, 240);
	copyFrameToImage(frame, image);

	detector.background(image);

	std::cout << "starting capture loop\r\n";

	sdl::GLContext::setVersion(4, 3);

	ImageViewer viewer;
	viewer.add(0, 0, 320, 240);
	viewer.add(320, 0, 320, 240);
	viewer.add(0, 240, 320, 240);
	viewer.add(320, 240, 320, 240);
	
	CenterPointExtractor centerPointExtractor(MinBlobSize);
	MotionRecorder recorder;

	while (true)
	{
		stream.readFrame(&frame);

		copyFrameToImage(frame, image);
		
		detector.detect(image);

		std::vector<LineSegment> segments;
		segmenter.segment(detector.mask(), segments);


		std::vector<std::pair<float, float>> centerPoints;
		centerPointExtractor.extract(segments, centerPoints);

		recorder.track(centerPoints);
		
		viewer.crosses.clear();
		std::transform(begin(centerPoints), end(centerPoints), std::back_inserter(viewer.crosses), [](std::pair<float, float>& coord) {
			return Cross{ coord.first, coord.second };
		});

		viewer.lines.clear();
		std::transform(begin(recorder.motions()), end(recorder.motions()), std::back_inserter(viewer.lines), [](const Motion& motion) {
			return Lines{ motion.points };
		});
		
		viewer[0].update(detector.mask());
		viewer[1].update(image);
		viewer[2].update(detector.background());
		viewer[3].update(detector.difference());
		
		viewer.update();
	}
	
	openni::OpenNI::shutdown();
	return 0;
}
开发者ID:patrigg,项目名称:TouchTable,代码行数:100,代码来源:TouchVisualizer.cpp

示例11: main


//.........这里部分代码省略.........
	VideoStream streamInitDepth;
    streamInitDepth.create( devAnyDevice, SENSOR_DEPTH );

	VideoStream streamInitColor;
    streamInitColor.create( devAnyDevice, SENSOR_COLOR );

	streamInitDepth.setVideoMode( mModeDepth );
	streamInitColor.setVideoMode( mModeColor );

	namedWindow( "Depth Image (Init)",  CV_WINDOW_AUTOSIZE );
    namedWindow( "Color Image (Init)",  CV_WINDOW_AUTOSIZE );
	//namedWindow( "Thresholded Image (Init)", CV_WINDOW_AUTOSIZE );

	VideoFrameRef  frameDepthInit;
    VideoFrameRef  frameColorInit;

	streamInitDepth.start();
	streamInitColor.start();
	cv::Mat BackgroundFrame;

	int avgDist = 0;
	int iMaxDepthInit = streamInitDepth.getMaxPixelValue();
	
	OutX.clear();
	OutY.clear();

	vector<int> OldOutX, OldOutY;
	OldOutX.clear();
	OldOutY.clear();
	//------------------------------------------------------------
	//--------------------[Initiation Process]--------------------
	while( true )
	{
		streamInitDepth.readFrame( &frameDepthInit );
		streamInitColor.readFrame( &frameColorInit );

		const cv::Mat mImageDepth( frameDepthInit.getHeight(), frameDepthInit.getWidth(), CV_16UC1, (void*)frameDepthInit.getData());

        cv::Mat mScaledDepth;
        mImageDepth.convertTo( mScaledDepth, CV_8U, 255.0 / iMaxDepthInit );

        cv::imshow( "Depth Image (Init)", mScaledDepth );

        const cv::Mat mImageRGB(frameColorInit.getHeight(), frameColorInit.getWidth(), CV_8UC3, (void*)frameColorInit.getData());

        cv::Mat cImageBGR;
        cv::cvtColor( mImageRGB, cImageBGR, CV_RGB2BGR );

		//--------------------[Get Average Distance]---------------------
		int depthVal = 0;
		int frameHeight = frameDepthInit.getHeight();
		int frameWidth = frameDepthInit.getWidth();
		//------------
		//backgroundDepth.resize(frameHeight * frameWidth);
		//---------------------------------------------------------------
		
		int initCount = 0;
		for(int i = 0; i < frameHeight; i++)
		{
			for(int j = 0; j < frameWidth; j++)
			{
				depthVal = mImageDepth.at<unsigned short>(i, j) + depthVal;
				initCount++;
			}
		}
		avgDist = depthVal / ((frameHeight) * (frameWidth));
开发者ID:AlDCheng,项目名称:Osus,代码行数:67,代码来源:KinectDepthSVM.cpp

示例12: onNewFrame

	void onNewFrame(VideoStream& stream)
	{
		stream.readFrame(&m_frame);

		analyzeFrame(m_frame);
	}
开发者ID:MetaMagic,项目名称:OpenNI2,代码行数:6,代码来源:main.cpp

示例13: onNewFrame

void DepthSource::onNewFrame(VideoStream& stream)
{
    Status status = stream.readFrame(&videoFrameRef);
    if (status != STATUS_OK)
    {
        ofLogError() << "DepthSource readFrame FAIL: " <<  OpenNI::getExtendedError();
    }
    const DepthPixel* oniDepthPixels = (const DepthPixel*)videoFrameRef.getData();
    
    if (doRawDepth)
    {
        if (doDoubleBuffering)
        {
            backRawPixels->setFromPixels(oniDepthPixels, width, height, 1);
            swap(backRawPixels,currentRawPixels);
        }else
        {
            currentRawPixels->setFromPixels(oniDepthPixels, width, height, 1);
        }
    }
    
    ofPixels* pixelBuffer = currentPixels;
    if (doDoubleBuffering)
    {
        pixelBuffer = backPixels;
    }
    
    float max = 255;
    unsigned char nearColor=255;
    unsigned char farColor=20;
    for (unsigned short y = 0; y < height; y++)
    {
        unsigned char * pixel = pixelBuffer->getPixels() + y * width * 4;
        for (unsigned short  x = 0; x < width; x++, oniDepthPixels++, pixel += 4)
        {
            unsigned char red = 0;
            unsigned char green = 0;
            unsigned char blue = 0;
            unsigned char alpha = 255;
            
            unsigned short col_index;
            
            
            if( (*oniDepthPixels > nearClipping) && (*oniDepthPixels< farClipping) ){
                unsigned char a = //(unsigned char)(((*oniDepthPixels) / ( deviceMaxDepth / max)));
                ofMap(*oniDepthPixels, nearClipping, farClipping, nearColor, farColor, true);
                red		= a;
                green	= a;
                blue	= a;
                pixel[0] = red;
                pixel[1] = green;
                pixel[2] = blue;
                
                if (*oniDepthPixels == 0)
                {
                    pixel[3] = 0;
                } else
                {
                    pixel[3] = alpha;
                }
            }else{
                pixel[0] = 0;
                pixel[1] = 0;
                pixel[2] = 0;
                pixel[3] = 255;
            }
            
            
        }
    }
    if (doDoubleBuffering)
    {
        swap(backPixels,currentPixels);
    }
    

}
开发者ID:serman,项目名称:ofxOpenNI2Grabber,代码行数:77,代码来源:DepthSource.cpp

示例14: main

int main()
{
	// 2. initialize OpenNI
	Status rc = OpenNI::initialize();
	if (rc != STATUS_OK)
	{
		printf("Initialize failed\n%s\n", OpenNI::getExtendedError());
		return 1;
	}

	// 3. open a device
	Device device;
	rc = device.open(ANY_DEVICE);
	if (rc != STATUS_OK)
	{
		printf("Couldn't open device\n%s\n", OpenNI::getExtendedError());
		return 2;
	}

	// 4. create depth stream
	VideoStream depth;
	if (device.getSensorInfo(SENSOR_DEPTH) != NULL){
		rc = depth.create(device, SENSOR_DEPTH);
		if (rc != STATUS_OK){
			printf("Couldn't create depth stream\n%s\n", OpenNI::getExtendedError());
			return 3;
		}
	}
	VideoStream color;
	if (device.getSensorInfo(SENSOR_COLOR) != NULL){
		rc = color.create(device, SENSOR_COLOR);
		if (rc != STATUS_OK){
			printf("Couldn't create color stream\n%s\n", OpenNI::getExtendedError());
			return 4;
		}
	}

	// 5. create OpenCV Window
	cv::namedWindow("Depth Image", CV_WINDOW_AUTOSIZE);
	cv::namedWindow("Color Image", CV_WINDOW_AUTOSIZE);

	// 6. start
	rc = depth.start();
	if (rc != STATUS_OK)
	{
		printf("Couldn't start the depth stream\n%s\n", OpenNI::getExtendedError());
		return 5;
	}
	rc = color.start();
	if (rc != STATUS_OK){
		printf("Couldn't start the depth stream\n%s\n", OpenNI::getExtendedError());
		return 6;
	}
	VideoFrameRef colorframe;
	VideoFrameRef depthframe;
	int iMaxDepth = depth.getMaxPixelValue();

	cv::Mat colorimageRGB;
	cv::Mat colorimageBGR;
	cv::Mat depthimage;
	cv::Mat depthimageScaled;

	// 7. main loop, continue read
	while (!wasKeyboardHit())
	{
		// 8. check is color stream is available
		if (color.isValid()){
			if (color.readFrame(&colorframe) == STATUS_OK){
				colorimageRGB = { colorframe.getHeight(), colorframe.getWidth(), CV_8UC3, (void*)colorframe.getData() };
				cv::cvtColor(colorimageRGB, colorimageBGR, CV_RGB2BGR);
			}
		}

		// 9. check is depth stream is available
		if (depth.isValid()){
			if (depth.readFrame(&depthframe) == STATUS_OK){
				depthimage = { depthframe.getHeight(), depthframe.getWidth(), CV_16UC1, (void*)depthframe.getData() };
				depthimage.convertTo(depthimageScaled, CV_8U, 255.0 / iMaxDepth);
			}
		}

		cv::imshow("Color Image", colorimageBGR);
		cv::imshow("Depth Image", depthimageScaled);
		cv::waitKey(10);
	}

	color.stop();
	depth.stop();

	color.destroy();
	depth.destroy();

	device.close();
	OpenNI::shutdown();

	return 0;
}
开发者ID:sc1991327,项目名称:testOpenNI,代码行数:97,代码来源:main.cpp


注:本文中的VideoStream::readFrame方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。