本文整理汇总了C++中VideoMode::setFps方法的典型用法代码示例。如果您正苦于以下问题:C++ VideoMode::setFps方法的具体用法?C++ VideoMode::setFps怎么用?C++ VideoMode::setFps使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类VideoMode
的用法示例。
在下文中一共展示了VideoMode::setFps方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: initializeVideo
void TouchTracking::initializeVideo()
{
OpenNI::initialize();
if (device.open(ANY_DEVICE) != STATUS_OK)
{
throw std::runtime_error("could not open any device!");
}
if (!device.hasSensor(SENSOR_DEPTH))
{
throw std::runtime_error("sensor cannot receive depth!");
}
auto info = device.getSensorInfo(SENSOR_DEPTH);
auto& modes = info->getSupportedVideoModes();
//std::cout << "depth sensor supported modes:\r\n";
for (int i = 0; i < modes.getSize(); ++i)
{
m_videoModes.push_back(modes[i]);
//std::cout << "pixel format: " << mode.getPixelFormat() << "\t with: " << mode.getResolutionX() << "x" << mode.getResolutionY() << "@" << mode.getFps() << " fps\r\n";
}
VideoMode mode;
mode.setFps(60);
mode.setPixelFormat(PIXEL_FORMAT_DEPTH_1_MM);
mode.setResolution(320, 240);
videoMode(mode);
stream.setMirroringEnabled(false);
}
示例2: _tmain
int _tmain(int argc, _TCHAR* argv[])
{
DepthDetector detector(ThresholdMin, ThresholdMax);
ScanLineSegmenter segmenter;
OpenNI::initialize();
Device device;
if (device.open(ANY_DEVICE) != STATUS_OK)
{
std::cout << "could not open any device\r\n";
return 1;
}
if (device.hasSensor(SENSOR_DEPTH))
{
auto info = device.getSensorInfo(SENSOR_DEPTH);
auto& modes = info->getSupportedVideoModes();
std::cout << "depth sensor supported modes:\r\n";
for (int i = 0; i < modes.getSize(); ++i)
{
auto& mode = modes[i];
std::cout << "pixel format: " << mode.getPixelFormat() << "\t with: " << mode.getResolutionX() << "x" << mode.getResolutionY() << "@" << mode.getFps() << " fps\r\n";
}
}
VideoStream stream;
stream.create(device, SENSOR_DEPTH);
VideoMode mode;
mode.setFps(25);
mode.setPixelFormat(PIXEL_FORMAT_DEPTH_1_MM);
mode.setResolution(320, 240);
stream.setMirroringEnabled(true);
stream.setVideoMode(mode);
stream.start();
std::cout << "press any key to capture background\r\n";
std::cin.get();
VideoFrameRef frame;
stream.readFrame(&frame);
DepthImage image(320, 240);
copyFrameToImage(frame, image);
detector.background(image);
std::cout << "starting capture loop\r\n";
CenterPointExtractor centerPointExtractor(MinBlobSize);
std::chrono::high_resolution_clock timer;
auto startTime = timer.now();
int frameId = 0;
while (true)
{
stream.readFrame(&frame);
copyFrameToImage(frame, image);
detector.detect(image);
std::vector<LineSegment> segments;
segmenter.segment(detector.mask(), segments);
std::vector<std::pair<float, float>> centerPoints;
centerPointExtractor.extract(segments, centerPoints);
if (centerPoints.size())
{
std::cout << "point count: " << centerPoints.size();
std::cout << "\t points: ";
for (auto& point : centerPoints)
{
std::cout << "(" << point.first << ", " << point.second << ") ";
}
std::cout << "\r\n";
}
++frameId;
if (frameId % 64 == 0)
{
auto stopTime = timer.now();
auto elapsedTime = stopTime - startTime;
auto elapsedMilliseconds = std::chrono::duration_cast<std::chrono::milliseconds>(elapsedTime).count();
std::cout << "\t total frames: " << frameId << "\t fps: " << elapsedMilliseconds / 64 << std::endl;
startTime = stopTime;
}
}
openni::OpenNI::shutdown();
//.........这里部分代码省略.........
示例3: _tmain
int _tmain(int argc, _TCHAR* argv[])
{
sdl::Application app;
DepthDetector detector(ThresholdMin, ThresholdMax);
ScanLineSegmenter segmenter;
OpenNI::initialize();
Device device;
if (device.open(ANY_DEVICE) != STATUS_OK)
{
std::cout << "could not open any device\r\n";
return 1;
}
if (device.hasSensor(SENSOR_DEPTH))
{
auto info = device.getSensorInfo(SENSOR_DEPTH);
auto& modes = info->getSupportedVideoModes();
std::cout << "depth sensor supported modes:\r\n";
for (int i = 0; i < modes.getSize(); ++i)
{
auto& mode = modes[i];
std::cout << "pixel format: " << mode.getPixelFormat() << "\t with: " << mode.getResolutionX() << "x" << mode.getResolutionY() << "@" << mode.getFps() << " fps\r\n";
}
}
VideoStream stream;
stream.create(device, SENSOR_DEPTH);
VideoMode mode;
mode.setFps(25);
mode.setPixelFormat(PIXEL_FORMAT_DEPTH_1_MM);
mode.setResolution(320, 240);
stream.setMirroringEnabled(true);
stream.setVideoMode(mode);
stream.start();
std::cout << "press any key to capture background\r\n";
std::cin.get();
VideoFrameRef frame;
stream.readFrame(&frame);
DepthImage image(320, 240);
copyFrameToImage(frame, image);
detector.background(image);
std::cout << "starting capture loop\r\n";
sdl::GLContext::setVersion(4, 3);
ImageViewer viewer;
viewer.add(0, 0, 320, 240);
viewer.add(320, 0, 320, 240);
viewer.add(0, 240, 320, 240);
viewer.add(320, 240, 320, 240);
CenterPointExtractor centerPointExtractor(MinBlobSize);
MotionRecorder recorder;
while (true)
{
stream.readFrame(&frame);
copyFrameToImage(frame, image);
detector.detect(image);
std::vector<LineSegment> segments;
segmenter.segment(detector.mask(), segments);
std::vector<std::pair<float, float>> centerPoints;
centerPointExtractor.extract(segments, centerPoints);
recorder.track(centerPoints);
viewer.crosses.clear();
std::transform(begin(centerPoints), end(centerPoints), std::back_inserter(viewer.crosses), [](std::pair<float, float>& coord) {
return Cross{ coord.first, coord.second };
});
viewer.lines.clear();
std::transform(begin(recorder.motions()), end(recorder.motions()), std::back_inserter(viewer.lines), [](const Motion& motion) {
return Lines{ motion.points };
});
viewer[0].update(detector.mask());
viewer[1].update(image);
viewer[2].update(detector.background());
viewer[3].update(detector.difference());
viewer.update();
}
openni::OpenNI::shutdown();
return 0;
}
示例4: main
//.........这里部分代码省略.........
trainingData[i][1] = svmC[i+1];
trainingData[i][2] = svmA[i+1];
trainingData[i][3] = svmP[i+1];
}
Mat trainingDataMat(1000, 4, CV_32FC1, trainingData);
// Set up SVM's parameters
CvSVMParams params;
params = SVMFinger.get_params();
//params.svm_type = CvSVM::C_SVC;
//params.kernel_type = CvSVM::LINEAR;
//params.term_crit = cvTermCriteria(CV_TERMCRIT_ITER, 100, 1e-6);
// Train the SVM
SVMFinger.train_auto(trainingDataMat, labelsMat, Mat(), Mat(), params);
// Mat sampleMat = (Mat_<float>(1,2) << 138.5, 57);
// float response = SVMFinger.predict(sampleMat);
waitKey();
destroyWindow("Image");
destroyWindow("Image2");
//------------------------------------------
OpenNI::initialize();
Device devAnyDevice;
devAnyDevice.open(ANY_DEVICE);
//----------------[Define Video Settings]-------------------
//Set Properties of Depth Stream
VideoMode mModeDepth;
mModeDepth.setResolution( 640, 480 );
mModeDepth.setFps( 30 );
mModeDepth.setPixelFormat( PIXEL_FORMAT_DEPTH_100_UM );
//Set Properties of Color Stream
VideoMode mModeColor;
mModeColor.setResolution( 640, 480 );
mModeColor.setFps( 30 );
mModeColor.setPixelFormat( PIXEL_FORMAT_RGB888 );
//----------------------------------------------------------
//----------------------[Initial Streams]---------------------
VideoStream streamInitDepth;
streamInitDepth.create( devAnyDevice, SENSOR_DEPTH );
VideoStream streamInitColor;
streamInitColor.create( devAnyDevice, SENSOR_COLOR );
streamInitDepth.setVideoMode( mModeDepth );
streamInitColor.setVideoMode( mModeColor );
namedWindow( "Depth Image (Init)", CV_WINDOW_AUTOSIZE );
namedWindow( "Color Image (Init)", CV_WINDOW_AUTOSIZE );
//namedWindow( "Thresholded Image (Init)", CV_WINDOW_AUTOSIZE );
VideoFrameRef frameDepthInit;
VideoFrameRef frameColorInit;
streamInitDepth.start();
streamInitColor.start();
cv::Mat BackgroundFrame;
int avgDist = 0;
int iMaxDepthInit = streamInitDepth.getMaxPixelValue();
示例5: createVideoMode
void createVideoMode(VideoMode& m, int x, int y, int fps, PixelFormat format)
{
m.setResolution(x, y);
m.setFps(fps);
m.setPixelFormat(format);
}
示例6: initializeOpenNIDevice
int initializeOpenNIDevice(int deviceID ,const char * deviceName , Device &device , VideoStream &color , VideoStream &depth ,unsigned int width ,unsigned int height , unsigned int fps)
{
unsigned int openMode=OPENNI2_OPEN_REGULAR_ENUM; /* 0 = regular deviceID and enumeration*/
if (deviceName!=0)
{
//If our deviceName contains a .oni we assume that we have an oni file to open
if (strstr(deviceName,".oni")!=0)
{
fprintf(stderr,"Found an .ONI filename , trying to open it..\n");
openMode=OPENNI2_OPEN_USING_STRING;
} else
if (strlen(deviceName)>7)
{
fprintf(stderr,"deviceName is too long (%lu chars) , assuming it is a Device URI ..\n",strlen(deviceName));
openMode=OPENNI2_OPEN_USING_STRING;
}
}
switch (openMode)
{
//-------------------------------------------------------------------------------------
//If we have an ONI file to open just pass it as an argument to device.open(deviceName)
case OPENNI2_OPEN_USING_STRING :
if (device.open(deviceName) != STATUS_OK)
{
fprintf(stderr,"Could not open using given string ( %s ) : %s \n",deviceName,OpenNI::getExtendedError());
return 0;
}
break;
//-------------------------------------------------------------------------------------
//If we don't have a deviceName we assume deviceID points to the device we want to open so we will try to use
//the openNI enumerator to get the specific device URI for device with number deviceID and use this to device.open( devURI )
case OPENNI2_OPEN_REGULAR_ENUM :
default :
//We have to supply our own buffer to hold the uri device string , so we make one here
char devURIBuffer[512]={0};
if (device.open(getURIForDeviceNumber(deviceID,devURIBuffer,512)) != STATUS_OK)
{
fprintf(stderr,"Could not open an OpenNI device : %s \n",OpenNI::getExtendedError());
return 0;
}
break;
}
if (device.getSensorInfo(SENSOR_DEPTH) != NULL)
{
Status rc = depth.create(device, SENSOR_DEPTH);
if (rc == STATUS_OK)
{
VideoMode depthMode = depth.getVideoMode();
depthMode.setResolution(width,height);
depthMode.setFps(fps);
Status rc = depth.setVideoMode(depthMode);
if (rc != STATUS_OK) { fprintf(stderr,"Error getting color at video mode requested %u x %u @ %u fps\n%s\n",width,height,fps,OpenNI::getExtendedError()); }
if(depth.start()!= STATUS_OK)
{
fprintf(stderr,"Couldn't start the color stream: %s \n",OpenNI::getExtendedError());
return 0;
}
}
else
{
fprintf(stderr,"Couldn't create depth stream: %s \n",OpenNI::getExtendedError());
return 0;
}
}
if (device.getSensorInfo(SENSOR_COLOR) != NULL)
{
Status rc = color.create(device, SENSOR_COLOR);
if (rc == STATUS_OK)
{
VideoMode colorMode = color.getVideoMode();
colorMode.setResolution(width,height);
colorMode.setFps(fps);
Status rc = color.setVideoMode(colorMode);
if (rc != STATUS_OK) { fprintf(stderr,"Error getting depth at video mode requested %u x %u @ %u fps\n%s\n",width,height,fps,OpenNI::getExtendedError()); }
if(color.start() != STATUS_OK)
{
fprintf(stderr,"Couldn't start the color stream: %s \n",OpenNI::getExtendedError());
return 0;
}
}
else
{
fprintf(stderr,"Couldn't create depth stream: %s \n",OpenNI::getExtendedError());
OpenNI::getExtendedError();
return 0;
}
}
#if MOD_IR
if(device.getSensorInfo(SENSOR_IR) != NULL)
{
Status rc = ir.create(device, SENSOR_IR); // Create the VideoStream for IR
if (rc == STATUS_OK)
//.........这里部分代码省略.........
示例7: main
int main (int argc, char* argv[])
{
using namespace openni;
bool outputIsLibrary = true;
if (argc > 1) {
if (!strcmp(argv[1], "test_data")) {
std::cout << "Capturing data for testing.." << std::endl;
outputIsLibrary = false;
} else std::cout << "Capturing data for library.." << std::endl;
} else std::cout << "Capturing data for library.." << std::endl;
CreateDirectory ("Output", NULL);
int frameGap = 0;
std::string outputAddress;
if (outputIsLibrary) {
CreateDirectory ("Output/image_library", NULL);
if (argc > 1 && argv[2] != '\0') {
//if(argv[2] != '\0'){
char folder[256];
sprintf(folder, "Output/image_library/%s", argv[2]);
CreateDirectory (folder, NULL);
sprintf(folder, "Output/image_library/%s/rgbd", argv[2]);
outputAddress = folder;
CreateDirectory (outputAddress.c_str(), NULL);
sprintf(folder, "Output/image_library/%s/rgbd/rgb", argv[2]);
CreateDirectory (folder, NULL);
sprintf(folder, "Output/image_library/%s/rgbd/depth", argv[2]);
CreateDirectory (folder, NULL);
//}
} else {
CreateDirectory ("Output/image_library/item", NULL);
outputAddress = "Output/image_library/item/rgbd";
CreateDirectory (outputAddress.c_str(), NULL);
CreateDirectory ("Output/image_library/item/rgbd/rgb", NULL);
CreateDirectory ("Output/image_library/item/rgbd/depth", NULL);
}
frameGap = 75;
} else {
outputAddress = "Output/test_data/";
CreateDirectory (outputAddress.c_str(), NULL);
CreateDirectory ("Output/test_data/rgb", NULL);
CreateDirectory ("Output/test_data/depth", NULL);
frameGap = 200;
}
OpenNI::initialize();
puts( "Kinect initialization..." );
Device device;
if ( device.open( openni::ANY_DEVICE ) != 0 )
{
puts( "Kinect not found !" );
return -1;
}
puts( "Kinect opened" );
VideoStream depthV, colorV;
colorV.create( device, SENSOR_COLOR );
colorV.start();
puts( "Camera ok" );
depthV.create( device, SENSOR_DEPTH );
depthV.start();
puts( "Depth sensor ok" );
VideoMode paramvideo;
paramvideo.setResolution( 640, 480 );
paramvideo.setFps( 30 );
paramvideo.setPixelFormat( PIXEL_FORMAT_DEPTH_100_UM );
depthV.setVideoMode( paramvideo );
paramvideo.setPixelFormat( PIXEL_FORMAT_RGB888 );
colorV.setVideoMode( paramvideo );
// If the depth/color synchronisation is not necessary, start is faster :
//device.setDepthColorSyncEnabled( false );
// Otherwise, the streams can be synchronized with a reception in the order of our choice :
device.setDepthColorSyncEnabled( true );
device.setImageRegistrationMode( openni::IMAGE_REGISTRATION_DEPTH_TO_COLOR );
VideoStream** stream = new VideoStream*[2];
stream[0] = &depthV;
stream[1] = &colorV;
puts( "Kinect initialization completed" );
cv::Mat colorBuffer( cv::Size( 640, 480 ), CV_8UC3 );
cv::Mat depthBuffer( cv::Size( 640, 480 ), CV_16UC1 ), depthTemp, depthDisplay;
cv::namedWindow( "RGB", CV_WINDOW_AUTOSIZE );
cv::namedWindow( "Depth", CV_WINDOW_AUTOSIZE );
bool firstDepth = true, firstColor = true;
cv::Mat captureMat = cv::Mat::zeros(200, 200, CV_8UC3);
int rgbFrameCount = 0, outputFrameCount = 0, lastRgbFrameCount = -1;
for (;;)
{
cv::Mat color, depth, mask, depthShow;
if ( device.getSensorInfo( SENSOR_DEPTH ) != NULL ) {
VideoFrameRef depthFrame, colorFrame;
cv::Mat colorcv( cv::Size( 640, 480 ), CV_8UC3);
//.........这里部分代码省略.........