当前位置: 首页>>代码示例>>C++>>正文


C++ VideoMode::setResolution方法代码示例

本文整理汇总了C++中VideoMode::setResolution方法的典型用法代码示例。如果您正苦于以下问题:C++ VideoMode::setResolution方法的具体用法?C++ VideoMode::setResolution怎么用?C++ VideoMode::setResolution使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在VideoMode的用法示例。


在下文中一共展示了VideoMode::setResolution方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: setDepthCameraResolution

void XtionDepthDriverImpl::setDepthCameraResolution(const DepthResolution resolution)
{
  _stream.stop();

  VideoMode mode = _stream.getVideoMode();

  if(resolution == DEPTH_RESOLUTION_320_240) {
    mode.setResolution(320, 240);
  } else if(resolution == DEPTH_RESOLUTION_640_480) {
    mode.setResolution(640, 480);
  } else {
    _stream.start();
    throw Exception("Invalid resolution");
  }

  Status rc = _stream.setVideoMode(mode);
  if (rc != STATUS_OK) {
    _stream.start();
    throw Exception(std::string("Set the resolution failed with\n")
      + OpenNI::getExtendedError());
  }
  
  rc = _stream.start();
  if(rc != STATUS_OK) {
    // how handle this?!
    close();
    
    throw Exception("Unable to start the depth stream");
  }
}
开发者ID:Clemensius,项目名称:libkovan,代码行数:30,代码来源:xtion_depth_driver_impl_p.cpp

示例2: initializeVideo

void TouchTracking::initializeVideo()
{
    OpenNI::initialize();

    if (device.open(ANY_DEVICE) != STATUS_OK)
    {
        throw std::runtime_error("could not open any device!");
    }

    if (!device.hasSensor(SENSOR_DEPTH))
    {
        throw std::runtime_error("sensor cannot receive depth!");
    }

    auto info = device.getSensorInfo(SENSOR_DEPTH);
    auto& modes = info->getSupportedVideoModes();
    //std::cout << "depth sensor supported modes:\r\n";
    for (int i = 0; i < modes.getSize(); ++i)
    {
        m_videoModes.push_back(modes[i]);
        //std::cout << "pixel format: " << mode.getPixelFormat() << "\t with: " << mode.getResolutionX() << "x" << mode.getResolutionY() << "@" << mode.getFps() << " fps\r\n";
    }

    VideoMode mode;
    mode.setFps(60);
    mode.setPixelFormat(PIXEL_FORMAT_DEPTH_1_MM);
    mode.setResolution(320, 240);
    videoMode(mode);

    stream.setMirroringEnabled(false);
}
开发者ID:patrigg,项目名称:TouchTable,代码行数:31,代码来源:TouchTracking.cpp

示例3: _tmain

int _tmain(int argc, _TCHAR* argv[])
{
	DepthDetector detector(ThresholdMin, ThresholdMax);
	ScanLineSegmenter segmenter;

	OpenNI::initialize();

	Device device;
	if (device.open(ANY_DEVICE) != STATUS_OK)
	{
		std::cout << "could not open any device\r\n";
		return 1;
	}

	if (device.hasSensor(SENSOR_DEPTH))
	{
		auto info = device.getSensorInfo(SENSOR_DEPTH);
		auto& modes = info->getSupportedVideoModes();
		std::cout << "depth sensor supported modes:\r\n";
		for (int i = 0; i < modes.getSize(); ++i)
		{
			auto& mode = modes[i];
			std::cout << "pixel format: " << mode.getPixelFormat() << "\t with: " << mode.getResolutionX() << "x" << mode.getResolutionY() << "@" << mode.getFps() << " fps\r\n";
		}
	}

	VideoStream stream;
	stream.create(device, SENSOR_DEPTH);
	VideoMode mode;
	mode.setFps(25);
	mode.setPixelFormat(PIXEL_FORMAT_DEPTH_1_MM);
	mode.setResolution(320, 240);
	stream.setMirroringEnabled(true);
	stream.setVideoMode(mode);
	stream.start();

	std::cout << "press any key to capture background\r\n";
	std::cin.get();

	VideoFrameRef frame;
	stream.readFrame(&frame);

	DepthImage image(320, 240);
	copyFrameToImage(frame, image);

	detector.background(image);

	std::cout << "starting capture loop\r\n";

	CenterPointExtractor centerPointExtractor(MinBlobSize);
	std::chrono::high_resolution_clock timer;
	auto startTime = timer.now();
	int frameId = 0;
	while (true)
	{
		stream.readFrame(&frame);

		copyFrameToImage(frame, image);

		detector.detect(image);

		std::vector<LineSegment> segments;
		segmenter.segment(detector.mask(), segments);


		std::vector<std::pair<float, float>> centerPoints;
		centerPointExtractor.extract(segments, centerPoints);

		if (centerPoints.size())
		{
			std::cout << "point count: " << centerPoints.size();
		
			std::cout << "\t points: ";
		
			for (auto& point : centerPoints)
			{
				std::cout << "(" << point.first << ", " << point.second << ")  ";
			}
			std::cout << "\r\n";
		}

		++frameId;

		
		
		if (frameId % 64 == 0)
		{
			auto stopTime = timer.now();
			
			auto elapsedTime = stopTime - startTime;
			auto elapsedMilliseconds = std::chrono::duration_cast<std::chrono::milliseconds>(elapsedTime).count();

			std::cout << "\t total frames: " << frameId << "\t fps: " << elapsedMilliseconds / 64 << std::endl;

			startTime = stopTime;
		}
		
	}
	
	openni::OpenNI::shutdown();
//.........这里部分代码省略.........
开发者ID:patrigg,项目名称:TouchTable,代码行数:101,代码来源:TouchDebugOutput.cpp

示例4: _tmain

int _tmain(int argc, _TCHAR* argv[])
{
	sdl::Application app;

	DepthDetector detector(ThresholdMin, ThresholdMax);
	ScanLineSegmenter segmenter;

	OpenNI::initialize();

	Device device;
	if (device.open(ANY_DEVICE) != STATUS_OK)
	{
		std::cout << "could not open any device\r\n";
		return 1;
	}

	if (device.hasSensor(SENSOR_DEPTH))
	{
		auto info = device.getSensorInfo(SENSOR_DEPTH);
		auto& modes = info->getSupportedVideoModes();
		std::cout << "depth sensor supported modes:\r\n";
		for (int i = 0; i < modes.getSize(); ++i)
		{
			auto& mode = modes[i];
			std::cout << "pixel format: " << mode.getPixelFormat() << "\t with: " << mode.getResolutionX() << "x" << mode.getResolutionY() << "@" << mode.getFps() << " fps\r\n";
		}
	}

	VideoStream stream;
	stream.create(device, SENSOR_DEPTH);
	VideoMode mode;
	mode.setFps(25);
	mode.setPixelFormat(PIXEL_FORMAT_DEPTH_1_MM);
	mode.setResolution(320, 240);
	stream.setMirroringEnabled(true);
	stream.setVideoMode(mode);
	stream.start();

	std::cout << "press any key to capture background\r\n";
	std::cin.get();

	VideoFrameRef frame;
	stream.readFrame(&frame);

	DepthImage image(320, 240);
	copyFrameToImage(frame, image);

	detector.background(image);

	std::cout << "starting capture loop\r\n";

	sdl::GLContext::setVersion(4, 3);

	ImageViewer viewer;
	viewer.add(0, 0, 320, 240);
	viewer.add(320, 0, 320, 240);
	viewer.add(0, 240, 320, 240);
	viewer.add(320, 240, 320, 240);
	
	CenterPointExtractor centerPointExtractor(MinBlobSize);
	MotionRecorder recorder;

	while (true)
	{
		stream.readFrame(&frame);

		copyFrameToImage(frame, image);
		
		detector.detect(image);

		std::vector<LineSegment> segments;
		segmenter.segment(detector.mask(), segments);


		std::vector<std::pair<float, float>> centerPoints;
		centerPointExtractor.extract(segments, centerPoints);

		recorder.track(centerPoints);
		
		viewer.crosses.clear();
		std::transform(begin(centerPoints), end(centerPoints), std::back_inserter(viewer.crosses), [](std::pair<float, float>& coord) {
			return Cross{ coord.first, coord.second };
		});

		viewer.lines.clear();
		std::transform(begin(recorder.motions()), end(recorder.motions()), std::back_inserter(viewer.lines), [](const Motion& motion) {
			return Lines{ motion.points };
		});
		
		viewer[0].update(detector.mask());
		viewer[1].update(image);
		viewer[2].update(detector.background());
		viewer[3].update(detector.difference());
		
		viewer.update();
	}
	
	openni::OpenNI::shutdown();
	return 0;
}
开发者ID:patrigg,项目名称:TouchTable,代码行数:100,代码来源:TouchVisualizer.cpp

示例5: main


//.........这里部分代码省略.........
		trainingData[i][0] = svmE[i+1];
		trainingData[i][1] = svmC[i+1];
		trainingData[i][2] = svmA[i+1];
		trainingData[i][3] = svmP[i+1];
	}
    Mat trainingDataMat(1000, 4, CV_32FC1, trainingData);

    // Set up SVM's parameters
    CvSVMParams params;
	params = SVMFinger.get_params();
    //params.svm_type    = CvSVM::C_SVC;
    //params.kernel_type = CvSVM::LINEAR;
    //params.term_crit   = cvTermCriteria(CV_TERMCRIT_ITER, 100, 1e-6);

    // Train the SVM
    SVMFinger.train_auto(trainingDataMat, labelsMat, Mat(), Mat(), params);

//	Mat sampleMat = (Mat_<float>(1,2) << 138.5, 57);
//	float response = SVMFinger.predict(sampleMat);

	waitKey();
	destroyWindow("Image");
	destroyWindow("Image2");

	//------------------------------------------
	OpenNI::initialize();

	Device devAnyDevice;
    devAnyDevice.open(ANY_DEVICE);

	//----------------[Define Video Settings]-------------------
	//Set Properties of Depth Stream
	VideoMode mModeDepth;
	mModeDepth.setResolution( 640, 480 );
	mModeDepth.setFps( 30 );
	mModeDepth.setPixelFormat( PIXEL_FORMAT_DEPTH_100_UM );

	//Set Properties of Color Stream
	VideoMode mModeColor;
    mModeColor.setResolution( 640, 480 );
    mModeColor.setFps( 30 );
    mModeColor.setPixelFormat( PIXEL_FORMAT_RGB888 );
	//----------------------------------------------------------
	//----------------------[Initial Streams]---------------------
	VideoStream streamInitDepth;
    streamInitDepth.create( devAnyDevice, SENSOR_DEPTH );

	VideoStream streamInitColor;
    streamInitColor.create( devAnyDevice, SENSOR_COLOR );

	streamInitDepth.setVideoMode( mModeDepth );
	streamInitColor.setVideoMode( mModeColor );

	namedWindow( "Depth Image (Init)",  CV_WINDOW_AUTOSIZE );
    namedWindow( "Color Image (Init)",  CV_WINDOW_AUTOSIZE );
	//namedWindow( "Thresholded Image (Init)", CV_WINDOW_AUTOSIZE );

	VideoFrameRef  frameDepthInit;
    VideoFrameRef  frameColorInit;

	streamInitDepth.start();
	streamInitColor.start();
	cv::Mat BackgroundFrame;

	int avgDist = 0;
	int iMaxDepthInit = streamInitDepth.getMaxPixelValue();
开发者ID:AlDCheng,项目名称:Osus,代码行数:67,代码来源:KinectDepthSVM.cpp

示例6: createVideoMode

 void createVideoMode(VideoMode& m, int x, int y, int fps, PixelFormat format)
 {
   m.setResolution(x, y);
   m.setFps(fps);
   m.setPixelFormat(format);
 }
开发者ID:amiltonwong,项目名称:openni2_camera,代码行数:6,代码来源:camera.cpp

示例7: initializeOpenNIDevice

int initializeOpenNIDevice(int deviceID ,const  char * deviceName  , Device &device , VideoStream &color , VideoStream &depth ,unsigned int width ,unsigned int height , unsigned int fps)
{
   unsigned int openMode=OPENNI2_OPEN_REGULAR_ENUM; /* 0 = regular deviceID and enumeration*/
   if (deviceName!=0)
   {
      //If our deviceName contains a .oni we assume that we have an oni file to open
      if (strstr(deviceName,".oni")!=0)
         {
           fprintf(stderr,"Found an .ONI filename , trying to open it..\n");
           openMode=OPENNI2_OPEN_USING_STRING;
         } else
      if (strlen(deviceName)>7)
        {
           fprintf(stderr,"deviceName is too long (%lu chars) , assuming it is a Device URI ..\n",strlen(deviceName));
           openMode=OPENNI2_OPEN_USING_STRING;
        }

   }

   switch (openMode)
   {
     //-------------------------------------------------------------------------------------
     //If we have an ONI file to open just pass it as an argument to device.open(deviceName)
     case OPENNI2_OPEN_USING_STRING :
      if (device.open(deviceName) != STATUS_OK)
      {
        fprintf(stderr,"Could not open using given string ( %s ) : %s \n",deviceName,OpenNI::getExtendedError());
        return 0;
      }
     break;
     //-------------------------------------------------------------------------------------
     //If we don't have a deviceName we assume deviceID points to the device we want to open so we will try to use
     //the openNI enumerator to get the specific device URI for device with number deviceID and use this to device.open( devURI )
     case OPENNI2_OPEN_REGULAR_ENUM :
     default :
      //We have to supply our own buffer to hold the uri device string , so we make one here
      char devURIBuffer[512]={0};
      if (device.open(getURIForDeviceNumber(deviceID,devURIBuffer,512)) != STATUS_OK)
      {
        fprintf(stderr,"Could not open an OpenNI device : %s \n",OpenNI::getExtendedError());
        return 0;
      }
     break;
   }

if (device.getSensorInfo(SENSOR_DEPTH)  != NULL)
    {
        Status rc = depth.create(device, SENSOR_DEPTH);
        if (rc == STATUS_OK)
        {
            VideoMode depthMode = depth.getVideoMode();
            depthMode.setResolution(width,height);
            depthMode.setFps(fps);
            Status rc = depth.setVideoMode(depthMode);
            if (rc != STATUS_OK) { fprintf(stderr,"Error getting color at video mode requested %u x %u @ %u fps\n%s\n",width,height,fps,OpenNI::getExtendedError()); }

            if(depth.start()!= STATUS_OK)
            {
                fprintf(stderr,"Couldn't start the color stream: %s \n",OpenNI::getExtendedError());
                return 0;
            }
        }
        else
        {
            fprintf(stderr,"Couldn't create depth stream: %s \n",OpenNI::getExtendedError());
            return 0;
        }
    }

    if (device.getSensorInfo(SENSOR_COLOR) != NULL)
    {
        Status rc = color.create(device, SENSOR_COLOR);
        if (rc == STATUS_OK)
        {
            VideoMode colorMode = color.getVideoMode();
            colorMode.setResolution(width,height);
            colorMode.setFps(fps);
            Status rc = color.setVideoMode(colorMode);
            if (rc != STATUS_OK) { fprintf(stderr,"Error getting depth at video mode requested %u x %u @ %u fps\n%s\n",width,height,fps,OpenNI::getExtendedError()); }

            if(color.start() != STATUS_OK)
            {
                fprintf(stderr,"Couldn't start the color stream: %s \n",OpenNI::getExtendedError());
                return 0;
            }
        }
        else
        {
            fprintf(stderr,"Couldn't create depth stream: %s \n",OpenNI::getExtendedError());
            OpenNI::getExtendedError();
            return 0;
        }
    }


  #if MOD_IR
    if(device.getSensorInfo(SENSOR_IR) != NULL)
    {
        Status rc = ir.create(device, SENSOR_IR);    // Create the VideoStream for IR
        if (rc == STATUS_OK)
//.........这里部分代码省略.........
开发者ID:AmmarkoV,项目名称:RGBDAcquisition,代码行数:101,代码来源:OpenNI2Acquisition.cpp

示例8: initializeOpenni

void OpenniWrapperNode::initializeOpenni()
{
    Status rc = OpenNI::initialize();
    if (rc != STATUS_OK)
    {
        printf("Initialize failed\n%s\n", OpenNI::getExtendedError());
        return 1;
    } else {
        cout<<"Openni initialized"<<endl;
    }



    OpenNI::addDeviceConnectedListener(&m_devicePrinter);
    OpenNI::addDeviceDisconnectedListener(&m_devicePrinter);
    OpenNI::addDeviceStateChangedListener(&m_devicePrinter);

    openni::Array<openni::DeviceInfo> deviceList;
    openni::OpenNI::enumerateDevices(&deviceList);
    for (int i = 0; i < deviceList.getSize(); ++i)
    {
        printf("Device \"%s\" already connected\n", deviceList[i].getUri());
    }

    m_vDevice.resize(deviceList.getSize());
    m_vColor.resize(deviceList.getSize());
    m_vDepth.resize(deviceList.getSize());

    cout<<"Number of devices connected "<<deviceList.getSize()<<endl;

    for (size_t i=0; i<m_DevicesDefined;i++)
    {
        if (i+1>deviceList.getSize())
        {
            cout<<"Name "<<m_vCameraNamespace[i]<<" defined but the device is not connected."<<endl;
            break;
        }

        m_vDevice[i] = new Device();
        rc = m_vDevice[i]->open(deviceList[i].getUri());
        if (rc != STATUS_OK)
        {
            printf("Couldn't open device\n%s\n", OpenNI::getExtendedError());
            return 2;
        } else {
            cout<<"Opened device "<<deviceList[0].getUri()<<"   namespace "<<m_vCameraNamespace[i]<<endl;
        }

        cout<<"This device supports the following sensors ";
        bool sensor = m_vDevice[i]->hasSensor(SENSOR_IR); if (sensor) cout <<"IR ";
        sensor = m_vDevice[i]->hasSensor(SENSOR_COLOR); if (sensor) cout <<"COLOR ";
        sensor = m_vDevice[i]->hasSensor(SENSOR_DEPTH); if (sensor) cout <<"DEPTH ";
        cout<<endl;

        ImageRegistrationMode mode = IMAGE_REGISTRATION_DEPTH_TO_COLOR;
        bool registrationSupported = m_vDevice[i]->getImageRegistrationMode();
        if(registrationSupported)
        {
            cout<<"Image registration SUPPORTED"<<endl;
            rc = m_vDevice[i]->setImageRegistrationMode(mode);
            // handle ret
            if (rc != STATUS_OK)
            {
                std::cout<<"Could not set the image registration on. Some error occured  "<<rc<<std::endl;
            }
        } else {
            cout<<"Image registration NOT SUPPORTED"<<endl;
        }

        rc = m_vDevice[i]->setDepthColorSyncEnabled(true);
        // handle rc
        if (rc != STATUS_OK)
        {
            std::cout<<"Could not set the depth-color sync. Some error occured"<<std::endl;
        }

        if (m_vDevice[i]->getSensorInfo(SENSOR_DEPTH) != NULL)
        {
            m_vDepth[i] = new VideoStream;
            rc = m_vDepth[i]->create(*m_vDevice[i], SENSOR_DEPTH);
            if (rc != STATUS_OK)
            {
                printf("Couldn't create depth stream\n%s\n", OpenNI::getExtendedError());
            }
        }
        VideoMode depthVideoMode = m_vDepth[i]->getVideoMode();
        depthVideoMode.setResolution(640,480);
        rc = m_vDepth[i]->setVideoMode(depthVideoMode);
        if (rc != STATUS_OK)
        {
            printf("Couldn't set increased resolution for depth stream\n%s\n", OpenNI::getExtendedError());
        }

        rc = m_vDepth[i]->start();
        if (rc != STATUS_OK)
        {
            printf("Couldn't start the depth stream\n%s\n", OpenNI::getExtendedError());
        }


//.........这里部分代码省略.........
开发者ID:RaresAmbrus,项目名称:scitos_robot,代码行数:101,代码来源:OpenniWrapperNode.cpp

示例9: main

int main (int argc, char* argv[])
{
    using namespace openni;

    bool outputIsLibrary = true;
    if (argc > 1) {
        if (!strcmp(argv[1], "test_data")) {
            std::cout << "Capturing data for testing.." << std::endl;
            outputIsLibrary = false;
        } else std::cout << "Capturing data for library.." << std::endl;
    } else std::cout << "Capturing data for library.." << std::endl;

    CreateDirectory ("Output", NULL);
    int frameGap = 0;

    std::string outputAddress;

    if (outputIsLibrary) {
        CreateDirectory ("Output/image_library", NULL);
        if (argc > 1 && argv[2] != '\0') {
            //if(argv[2] != '\0'){
            char folder[256];
            sprintf(folder, "Output/image_library/%s", argv[2]);
            CreateDirectory (folder, NULL);
            sprintf(folder, "Output/image_library/%s/rgbd", argv[2]);
            outputAddress = folder;
            CreateDirectory (outputAddress.c_str(), NULL);
            sprintf(folder, "Output/image_library/%s/rgbd/rgb", argv[2]);
            CreateDirectory (folder, NULL);
            sprintf(folder, "Output/image_library/%s/rgbd/depth", argv[2]);
            CreateDirectory (folder, NULL);
            //}
        } else {
            CreateDirectory ("Output/image_library/item", NULL);
            outputAddress = "Output/image_library/item/rgbd";
            CreateDirectory (outputAddress.c_str(), NULL);
            CreateDirectory ("Output/image_library/item/rgbd/rgb", NULL);
            CreateDirectory ("Output/image_library/item/rgbd/depth", NULL);
        }
        frameGap = 75;
    } else {
        outputAddress = "Output/test_data/";
        CreateDirectory (outputAddress.c_str(), NULL);
        CreateDirectory ("Output/test_data/rgb", NULL);
        CreateDirectory ("Output/test_data/depth", NULL);
        frameGap = 200;
    }

    OpenNI::initialize();
    puts( "Kinect initialization..." );
    Device device;
    if ( device.open( openni::ANY_DEVICE ) != 0 )
    {
        puts( "Kinect not found !" );
        return -1;
    }
    puts( "Kinect opened" );
    VideoStream depthV, colorV;
    colorV.create( device, SENSOR_COLOR );
    colorV.start();
    puts( "Camera ok" );
    depthV.create( device, SENSOR_DEPTH );
    depthV.start();
    puts( "Depth sensor ok" );
    VideoMode paramvideo;
    paramvideo.setResolution( 640, 480 );
    paramvideo.setFps( 30 );
    paramvideo.setPixelFormat( PIXEL_FORMAT_DEPTH_100_UM );
    depthV.setVideoMode( paramvideo );
    paramvideo.setPixelFormat( PIXEL_FORMAT_RGB888 );
    colorV.setVideoMode( paramvideo );

    // If the depth/color synchronisation is not necessary, start is faster :
    //device.setDepthColorSyncEnabled( false );

    // Otherwise, the streams can be synchronized with a reception in the order of our choice :
    device.setDepthColorSyncEnabled( true );
    device.setImageRegistrationMode( openni::IMAGE_REGISTRATION_DEPTH_TO_COLOR );

    VideoStream** stream = new VideoStream*[2];
    stream[0] = &depthV;
    stream[1] = &colorV;
    puts( "Kinect initialization completed" );

    cv::Mat colorBuffer( cv::Size( 640, 480 ), CV_8UC3 );
    cv::Mat depthBuffer( cv::Size( 640, 480 ), CV_16UC1 ), depthTemp, depthDisplay;
    cv::namedWindow( "RGB", CV_WINDOW_AUTOSIZE );
    cv::namedWindow( "Depth", CV_WINDOW_AUTOSIZE );
    bool firstDepth = true, firstColor = true;

    cv::Mat captureMat = cv::Mat::zeros(200, 200, CV_8UC3);

    int rgbFrameCount = 0, outputFrameCount = 0, lastRgbFrameCount = -1;
    for (;;)
    {
        cv::Mat color, depth, mask, depthShow;

        if ( device.getSensorInfo( SENSOR_DEPTH ) != NULL ) {
            VideoFrameRef depthFrame, colorFrame;
            cv::Mat colorcv( cv::Size( 640, 480 ), CV_8UC3);
//.........这里部分代码省略.........
开发者ID:amazon-picking-challenge,项目名称:team_nanyang,代码行数:101,代码来源:capture_id.cpp


注:本文中的VideoMode::setResolution方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。