本文整理汇总了C++中VideoStream::setVideoMode方法的典型用法代码示例。如果您正苦于以下问题:C++ VideoStream::setVideoMode方法的具体用法?C++ VideoStream::setVideoMode怎么用?C++ VideoStream::setVideoMode使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类VideoStream
的用法示例。
在下文中一共展示了VideoStream::setVideoMode方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: tryConfigureVideoMode
virtual bool tryConfigureVideoMode(VideoMode& mode)
{
bool result = true;
VideoMode old = stream_.getVideoMode();
if(stream_.setVideoMode(mode) != STATUS_OK)
{
ROS_ERROR_STREAM_COND(stream_.setVideoMode(old) != STATUS_OK, "Failed to recover old video mode!");
result = false;
}
return result;
}
示例2: kinect_init
int kinect_init()
{
Status rc = OpenNI::initialize();
if (rc != STATUS_OK)
{
printf("Initialize failed\n%s\n", OpenNI::getExtendedError());
return 1;
}
rc = device.open(ANY_DEVICE);
if (rc != STATUS_OK)
{
printf("Couldn't open device\n%s\n", OpenNI::getExtendedError());
return 2;
}
if (device.getSensorInfo(SENSOR_DEPTH) != NULL)
{
rc = depth.create(device, SENSOR_DEPTH);
if (rc != STATUS_OK)
{
printf("Couldn't create depth stream\n%s\n", OpenNI::getExtendedError());
return 3;
}
const SensorInfo* sinfo = device.getSensorInfo(SENSOR_DEPTH);
const Array<VideoMode>& modes = sinfo->getSupportedVideoModes();
for (int i=0; i<modes.getSize(); i++) {
printf("%i: %ix%i, %i fps, %i format\n",
i,
modes[i].getResolutionX(),
modes[i].getResolutionY(),
modes[i].getFps(),
modes[i].getPixelFormat()
);
}
//rc = depth.setVideoMode(modes[0]); // 320x240, 30fps, format: 100
//rc = depth.setVideoMode(modes[4]); // 640x480, 30fps, format: 100
rc = depth.setVideoMode(modes[4]); // 640x480, 30fps, format: 100
if (rc != openni::STATUS_OK) {
printf("Failed to set depth resolution\n");
return -1;
}
}
rc = depth.start();
if (rc != STATUS_OK)
{
printf("Couldn't start the depth stream\n%s\n", OpenNI::getExtendedError());
return 4;
}
return 0;
}
示例3: CopyGeneralProperties
// Copy basic properties between VideoStream
void CopyGeneralProperties( const VideoStream& rSource, VideoStream& rTarget )
{
rTarget.setVideoMode( rSource.getVideoMode() );
// assign basic properties
rTarget.setProperty( ONI_STREAM_PROPERTY_VERTICAL_FOV, rSource.getVerticalFieldOfView() );
rTarget.setProperty( ONI_STREAM_PROPERTY_HORIZONTAL_FOV, rSource.getHorizontalFieldOfView() );
rTarget.setProperty( ONI_STREAM_PROPERTY_MIRRORING, rSource.getMirroringEnabled() );
// assign dpeth only properties
rTarget.setProperty( ONI_STREAM_PROPERTY_MIN_VALUE, rSource.getMinPixelValue() );
rTarget.setProperty( ONI_STREAM_PROPERTY_MAX_VALUE, rSource.getMaxPixelValue() );
}
示例4: SensorStreamManager
SensorStreamManager(ros::NodeHandle& nh, Device& device, SensorType type, std::string name, std::string frame_id, VideoMode& default_mode) :
device_(device),
default_mode_(default_mode),
name_(name),
frame_id_(frame_id),
running_(false),
nh_(nh, name_),
it_(nh_),
camera_info_manager_(nh_)
{
assert(device_.hasSensor(type));
callback_ = boost::bind(&SensorStreamManager::onSubscriptionChanged, this, _1);
publisher_ = it_.advertiseCamera("image_raw", 1, callback_, callback_);
ROS_ERROR_STREAM_COND(stream_.create(device_, type) != STATUS_OK, "Failed to create stream '" << toString(type) << "'!");
stream_.addNewFrameListener(this);
ROS_ERROR_STREAM_COND(stream_.setVideoMode(default_mode_) != STATUS_OK, "Failed to set default video mode for stream '" << toString(type) << "'!");
}
示例5: main
//.........这里部分代码省略.........
//DEPTH
pub_depth = n.advertise<sensor_msgs::Image>("/"+topic+"/depth/image_raw", 1);
pub_camera_info_depth = n.advertise<sensor_msgs::CameraInfo>("/"+topic+"/depth/camera_info", 1);
}
}
if(_rgb_mode>=0){
if (device.getSensorInfo(SENSOR_COLOR) != NULL){
rc = rgb.create(device, SENSOR_COLOR);
if (rc != STATUS_OK){
printf("Couldn't create rgb stream\n%s\n", OpenNI::getExtendedError());
fflush(stdout);
return 3;
}
//RGB
pub_rgb = n.advertise<sensor_msgs::Image>("/"+topic+"/rgb/image_raw", 1);
pub_camera_info_rgb = n.advertise<sensor_msgs::CameraInfo>("/"+topic+"/rgb/camera_info", 1);
}
}
if(_depth_mode<0 && _rgb_mode<0){
cout << "Depth modes" << endl;
const openni::SensorInfo* sinfo = device.getSensorInfo(openni::SENSOR_DEPTH); // select index=4 640x480, 30 fps, 1mm
const openni::Array< openni::VideoMode>& modesDepth = sinfo->getSupportedVideoModes();
printf("Enums data:\nPIXEL_FORMAT_DEPTH_1_MM = 100,\nPIXEL_FORMAT_DEPTH_100_UM = 101,\nPIXEL_FORMAT_SHIFT_9_2 = 102,\nPIXEL_FORMAT_SHIFT_9_3 = 103,\nPIXEL_FORMAT_RGB888 = 200,\nPIXEL_FORMAT_YUV422 = 201,\nPIXEL_FORMAT_GRAY8 = 202,\nPIXEL_FORMAT_GRAY16 = 203,\nPIXEL_FORMAT_JPEG = 204,\nPIXEL_FORMAT_YUYV = 205,\n\n");
cout << "Depth modes" << endl;
for (int i = 0; i<modesDepth.getSize(); i++) {
printf("%i: %ix%i, %i fps, %i format\n", i, modesDepth[i].getResolutionX(), modesDepth[i].getResolutionY(),modesDepth[i].getFps(), modesDepth[i].getPixelFormat()); //PIXEL_FORMAT_DEPTH_1_MM = 100, PIXEL_FORMAT_DEPTH_100_UM = 101
}
cout << "Rgb modes" << endl;
const openni::SensorInfo* sinfoRgb = device.getSensorInfo(openni::SENSOR_COLOR); // select index=4 640x480, 30 fps, 1mm
const openni::Array< openni::VideoMode>& modesRgb = sinfoRgb->getSupportedVideoModes();
for (int i = 0; i<modesRgb.getSize(); i++) {
printf("%i: %ix%i, %i fps, %i format\n", i, modesRgb[i].getResolutionX(), modesRgb[i].getResolutionY(),modesRgb[i].getFps(), modesRgb[i].getPixelFormat()); //PIXEL_FORMAT_DEPTH_1_MM = 100, PIXEL_FORMAT_DEPTH_100_UM
}
depth.stop();
depth.destroy();
rgb.stop();
rgb.destroy();
device.close();
OpenNI::shutdown();
exit(1);
}
if(_depth_mode>=0){
rc = depth.setVideoMode(device.getSensorInfo(SENSOR_DEPTH)->getSupportedVideoModes()[_depth_mode]);
depth.setMirroringEnabled(false);
rc = depth.start();
}
if(_rgb_mode>=0){
rc = rgb.setVideoMode(device.getSensorInfo(SENSOR_COLOR)->getSupportedVideoModes()[_rgb_mode]);
rgb.setMirroringEnabled(false);
rgb.getCameraSettings()->setAutoExposureEnabled(true);
rgb.getCameraSettings()->setAutoWhiteBalanceEnabled(true);
cerr << "Camera settings valid: " << rgb.getCameraSettings()->isValid() << endl;
rc = rgb.start();
}
if(_depth_mode>=0 && _rgb_mode>=0 && _sync==1){
rc =device.setDepthColorSyncEnabled(true);
if (rc != STATUS_OK) {
printf("Couldn't enable de pth and rgb images synchronization\n%s\n",
OpenNI::getExtendedError());
exit(2);
}
}
if(_depth_mode>=0 && _rgb_mode>=0 && _registration==1){
device.setImageRegistrationMode(openni::IMAGE_REGISTRATION_DEPTH_TO_COLOR);
}
run = true;
pthread_t runner;
pthread_create(&runner, 0, camera_thread, 0);
ros::spin();
void* result;
run =false;
pthread_join(runner, &result);
depth.stop();
depth.destroy();
rgb.stop();
rgb.destroy();
device.close();
OpenNI::shutdown();
return 0;
}
示例6: _tmain
int _tmain(int argc, _TCHAR* argv[])
{
DepthDetector detector(ThresholdMin, ThresholdMax);
ScanLineSegmenter segmenter;
OpenNI::initialize();
Device device;
if (device.open(ANY_DEVICE) != STATUS_OK)
{
std::cout << "could not open any device\r\n";
return 1;
}
if (device.hasSensor(SENSOR_DEPTH))
{
auto info = device.getSensorInfo(SENSOR_DEPTH);
auto& modes = info->getSupportedVideoModes();
std::cout << "depth sensor supported modes:\r\n";
for (int i = 0; i < modes.getSize(); ++i)
{
auto& mode = modes[i];
std::cout << "pixel format: " << mode.getPixelFormat() << "\t with: " << mode.getResolutionX() << "x" << mode.getResolutionY() << "@" << mode.getFps() << " fps\r\n";
}
}
VideoStream stream;
stream.create(device, SENSOR_DEPTH);
VideoMode mode;
mode.setFps(25);
mode.setPixelFormat(PIXEL_FORMAT_DEPTH_1_MM);
mode.setResolution(320, 240);
stream.setMirroringEnabled(true);
stream.setVideoMode(mode);
stream.start();
std::cout << "press any key to capture background\r\n";
std::cin.get();
VideoFrameRef frame;
stream.readFrame(&frame);
DepthImage image(320, 240);
copyFrameToImage(frame, image);
detector.background(image);
std::cout << "starting capture loop\r\n";
CenterPointExtractor centerPointExtractor(MinBlobSize);
std::chrono::high_resolution_clock timer;
auto startTime = timer.now();
int frameId = 0;
while (true)
{
stream.readFrame(&frame);
copyFrameToImage(frame, image);
detector.detect(image);
std::vector<LineSegment> segments;
segmenter.segment(detector.mask(), segments);
std::vector<std::pair<float, float>> centerPoints;
centerPointExtractor.extract(segments, centerPoints);
if (centerPoints.size())
{
std::cout << "point count: " << centerPoints.size();
std::cout << "\t points: ";
for (auto& point : centerPoints)
{
std::cout << "(" << point.first << ", " << point.second << ") ";
}
std::cout << "\r\n";
}
++frameId;
if (frameId % 64 == 0)
{
auto stopTime = timer.now();
auto elapsedTime = stopTime - startTime;
auto elapsedMilliseconds = std::chrono::duration_cast<std::chrono::milliseconds>(elapsedTime).count();
std::cout << "\t total frames: " << frameId << "\t fps: " << elapsedMilliseconds / 64 << std::endl;
startTime = stopTime;
}
}
openni::OpenNI::shutdown();
//.........这里部分代码省略.........
示例7: _tmain
int _tmain(int argc, _TCHAR* argv[])
{
sdl::Application app;
DepthDetector detector(ThresholdMin, ThresholdMax);
ScanLineSegmenter segmenter;
OpenNI::initialize();
Device device;
if (device.open(ANY_DEVICE) != STATUS_OK)
{
std::cout << "could not open any device\r\n";
return 1;
}
if (device.hasSensor(SENSOR_DEPTH))
{
auto info = device.getSensorInfo(SENSOR_DEPTH);
auto& modes = info->getSupportedVideoModes();
std::cout << "depth sensor supported modes:\r\n";
for (int i = 0; i < modes.getSize(); ++i)
{
auto& mode = modes[i];
std::cout << "pixel format: " << mode.getPixelFormat() << "\t with: " << mode.getResolutionX() << "x" << mode.getResolutionY() << "@" << mode.getFps() << " fps\r\n";
}
}
VideoStream stream;
stream.create(device, SENSOR_DEPTH);
VideoMode mode;
mode.setFps(25);
mode.setPixelFormat(PIXEL_FORMAT_DEPTH_1_MM);
mode.setResolution(320, 240);
stream.setMirroringEnabled(true);
stream.setVideoMode(mode);
stream.start();
std::cout << "press any key to capture background\r\n";
std::cin.get();
VideoFrameRef frame;
stream.readFrame(&frame);
DepthImage image(320, 240);
copyFrameToImage(frame, image);
detector.background(image);
std::cout << "starting capture loop\r\n";
sdl::GLContext::setVersion(4, 3);
ImageViewer viewer;
viewer.add(0, 0, 320, 240);
viewer.add(320, 0, 320, 240);
viewer.add(0, 240, 320, 240);
viewer.add(320, 240, 320, 240);
CenterPointExtractor centerPointExtractor(MinBlobSize);
MotionRecorder recorder;
while (true)
{
stream.readFrame(&frame);
copyFrameToImage(frame, image);
detector.detect(image);
std::vector<LineSegment> segments;
segmenter.segment(detector.mask(), segments);
std::vector<std::pair<float, float>> centerPoints;
centerPointExtractor.extract(segments, centerPoints);
recorder.track(centerPoints);
viewer.crosses.clear();
std::transform(begin(centerPoints), end(centerPoints), std::back_inserter(viewer.crosses), [](std::pair<float, float>& coord) {
return Cross{ coord.first, coord.second };
});
viewer.lines.clear();
std::transform(begin(recorder.motions()), end(recorder.motions()), std::back_inserter(viewer.lines), [](const Motion& motion) {
return Lines{ motion.points };
});
viewer[0].update(detector.mask());
viewer[1].update(image);
viewer[2].update(detector.background());
viewer[3].update(detector.difference());
viewer.update();
}
openni::OpenNI::shutdown();
return 0;
}
示例8: main
//.........这里部分代码省略.........
// Mat sampleMat = (Mat_<float>(1,2) << 138.5, 57);
// float response = SVMFinger.predict(sampleMat);
waitKey();
destroyWindow("Image");
destroyWindow("Image2");
//------------------------------------------
OpenNI::initialize();
Device devAnyDevice;
devAnyDevice.open(ANY_DEVICE);
//----------------[Define Video Settings]-------------------
//Set Properties of Depth Stream
VideoMode mModeDepth;
mModeDepth.setResolution( 640, 480 );
mModeDepth.setFps( 30 );
mModeDepth.setPixelFormat( PIXEL_FORMAT_DEPTH_100_UM );
//Set Properties of Color Stream
VideoMode mModeColor;
mModeColor.setResolution( 640, 480 );
mModeColor.setFps( 30 );
mModeColor.setPixelFormat( PIXEL_FORMAT_RGB888 );
//----------------------------------------------------------
//----------------------[Initial Streams]---------------------
VideoStream streamInitDepth;
streamInitDepth.create( devAnyDevice, SENSOR_DEPTH );
VideoStream streamInitColor;
streamInitColor.create( devAnyDevice, SENSOR_COLOR );
streamInitDepth.setVideoMode( mModeDepth );
streamInitColor.setVideoMode( mModeColor );
namedWindow( "Depth Image (Init)", CV_WINDOW_AUTOSIZE );
namedWindow( "Color Image (Init)", CV_WINDOW_AUTOSIZE );
//namedWindow( "Thresholded Image (Init)", CV_WINDOW_AUTOSIZE );
VideoFrameRef frameDepthInit;
VideoFrameRef frameColorInit;
streamInitDepth.start();
streamInitColor.start();
cv::Mat BackgroundFrame;
int avgDist = 0;
int iMaxDepthInit = streamInitDepth.getMaxPixelValue();
OutX.clear();
OutY.clear();
vector<int> OldOutX, OldOutY;
OldOutX.clear();
OldOutY.clear();
//------------------------------------------------------------
//--------------------[Initiation Process]--------------------
while( true )
{
streamInitDepth.readFrame( &frameDepthInit );
streamInitColor.readFrame( &frameColorInit );
const cv::Mat mImageDepth( frameDepthInit.getHeight(), frameDepthInit.getWidth(), CV_16UC1, (void*)frameDepthInit.getData());
cv::Mat mScaledDepth;
示例9: initializeOpenNIDevice
int initializeOpenNIDevice(int deviceID ,const char * deviceName , Device &device , VideoStream &color , VideoStream &depth ,unsigned int width ,unsigned int height , unsigned int fps)
{
unsigned int openMode=OPENNI2_OPEN_REGULAR_ENUM; /* 0 = regular deviceID and enumeration*/
if (deviceName!=0)
{
//If our deviceName contains a .oni we assume that we have an oni file to open
if (strstr(deviceName,".oni")!=0)
{
fprintf(stderr,"Found an .ONI filename , trying to open it..\n");
openMode=OPENNI2_OPEN_USING_STRING;
} else
if (strlen(deviceName)>7)
{
fprintf(stderr,"deviceName is too long (%lu chars) , assuming it is a Device URI ..\n",strlen(deviceName));
openMode=OPENNI2_OPEN_USING_STRING;
}
}
switch (openMode)
{
//-------------------------------------------------------------------------------------
//If we have an ONI file to open just pass it as an argument to device.open(deviceName)
case OPENNI2_OPEN_USING_STRING :
if (device.open(deviceName) != STATUS_OK)
{
fprintf(stderr,"Could not open using given string ( %s ) : %s \n",deviceName,OpenNI::getExtendedError());
return 0;
}
break;
//-------------------------------------------------------------------------------------
//If we don't have a deviceName we assume deviceID points to the device we want to open so we will try to use
//the openNI enumerator to get the specific device URI for device with number deviceID and use this to device.open( devURI )
case OPENNI2_OPEN_REGULAR_ENUM :
default :
//We have to supply our own buffer to hold the uri device string , so we make one here
char devURIBuffer[512]={0};
if (device.open(getURIForDeviceNumber(deviceID,devURIBuffer,512)) != STATUS_OK)
{
fprintf(stderr,"Could not open an OpenNI device : %s \n",OpenNI::getExtendedError());
return 0;
}
break;
}
if (device.getSensorInfo(SENSOR_DEPTH) != NULL)
{
Status rc = depth.create(device, SENSOR_DEPTH);
if (rc == STATUS_OK)
{
VideoMode depthMode = depth.getVideoMode();
depthMode.setResolution(width,height);
depthMode.setFps(fps);
Status rc = depth.setVideoMode(depthMode);
if (rc != STATUS_OK) { fprintf(stderr,"Error getting color at video mode requested %u x %u @ %u fps\n%s\n",width,height,fps,OpenNI::getExtendedError()); }
if(depth.start()!= STATUS_OK)
{
fprintf(stderr,"Couldn't start the color stream: %s \n",OpenNI::getExtendedError());
return 0;
}
}
else
{
fprintf(stderr,"Couldn't create depth stream: %s \n",OpenNI::getExtendedError());
return 0;
}
}
if (device.getSensorInfo(SENSOR_COLOR) != NULL)
{
Status rc = color.create(device, SENSOR_COLOR);
if (rc == STATUS_OK)
{
VideoMode colorMode = color.getVideoMode();
colorMode.setResolution(width,height);
colorMode.setFps(fps);
Status rc = color.setVideoMode(colorMode);
if (rc != STATUS_OK) { fprintf(stderr,"Error getting depth at video mode requested %u x %u @ %u fps\n%s\n",width,height,fps,OpenNI::getExtendedError()); }
if(color.start() != STATUS_OK)
{
fprintf(stderr,"Couldn't start the color stream: %s \n",OpenNI::getExtendedError());
return 0;
}
}
else
{
fprintf(stderr,"Couldn't create depth stream: %s \n",OpenNI::getExtendedError());
OpenNI::getExtendedError();
return 0;
}
}
#if MOD_IR
if(device.getSensorInfo(SENSOR_IR) != NULL)
{
Status rc = ir.create(device, SENSOR_IR); // Create the VideoStream for IR
if (rc == STATUS_OK)
//.........这里部分代码省略.........