本文整理汇总了C++中VideoStream::start方法的典型用法代码示例。如果您正苦于以下问题:C++ VideoStream::start方法的具体用法?C++ VideoStream::start怎么用?C++ VideoStream::start使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类VideoStream
的用法示例。
在下文中一共展示了VideoStream::start方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main
int main()
{
Status rc = OpenNI::initialize();
if (rc != STATUS_OK)
{
printf("Initialize failed\n%s\n", OpenNI::getExtendedError());
return 1;
}
OpenNIEventListener eventPrinter;
OpenNI::addListener(&eventPrinter);
Device device;
rc = device.open(ANY_DEVICE);
if (rc != STATUS_OK)
{
printf("Couldn't open device\n%s\n", OpenNI::getExtendedError());
return 2;
}
VideoStream depth;
if (device.getSensorInfo(SENSOR_DEPTH) != NULL)
{
rc = depth.create(device, SENSOR_DEPTH);
if (rc != STATUS_OK)
{
printf("Couldn't create depth stream\n%s\n", OpenNI::getExtendedError());
}
}
rc = depth.start();
if (rc != STATUS_OK)
{
printf("Couldn't start the depth stream\n%s\n", OpenNI::getExtendedError());
}
PrintCallback depthPrinter;
// Register to new frame
depth.addListener(&depthPrinter);
// Wait while we're getting frames through the printer
while (!wasKeyboardHit())
{
Sleep(100);
}
depth.removeListener(&depthPrinter);
depth.stop();
depth.destroy();
device.close();
OpenNI::shutdown();
return 0;
}
示例2: kinect_init
int kinect_init()
{
Status rc = OpenNI::initialize();
if (rc != STATUS_OK)
{
printf("Initialize failed\n%s\n", OpenNI::getExtendedError());
return 1;
}
rc = device.open(ANY_DEVICE);
if (rc != STATUS_OK)
{
printf("Couldn't open device\n%s\n", OpenNI::getExtendedError());
return 2;
}
if (device.getSensorInfo(SENSOR_DEPTH) != NULL)
{
rc = depth.create(device, SENSOR_DEPTH);
if (rc != STATUS_OK)
{
printf("Couldn't create depth stream\n%s\n", OpenNI::getExtendedError());
return 3;
}
const SensorInfo* sinfo = device.getSensorInfo(SENSOR_DEPTH);
const Array<VideoMode>& modes = sinfo->getSupportedVideoModes();
for (int i=0; i<modes.getSize(); i++) {
printf("%i: %ix%i, %i fps, %i format\n",
i,
modes[i].getResolutionX(),
modes[i].getResolutionY(),
modes[i].getFps(),
modes[i].getPixelFormat()
);
}
//rc = depth.setVideoMode(modes[0]); // 320x240, 30fps, format: 100
//rc = depth.setVideoMode(modes[4]); // 640x480, 30fps, format: 100
rc = depth.setVideoMode(modes[4]); // 640x480, 30fps, format: 100
if (rc != openni::STATUS_OK) {
printf("Failed to set depth resolution\n");
return -1;
}
}
rc = depth.start();
if (rc != STATUS_OK)
{
printf("Couldn't start the depth stream\n%s\n", OpenNI::getExtendedError());
return 4;
}
return 0;
}
示例3: endConfigure
virtual void endConfigure()
{
if(was_running_)
{
Status rc = stream_.start();
if(rc != STATUS_OK)
{
SensorType type = stream_.getSensorInfo().getSensorType();
ROS_WARN_STREAM("Failed to restart stream '" << name_ << "' after configuration!");
int max_trials = 1;
for(int trials = 0; trials < max_trials && rc != STATUS_OK; ++trials)
{
ros::Duration(0.1).sleep();
stream_.removeNewFrameListener(this);
stream_.destroy();
stream_.create(device_, type);
stream_.addNewFrameListener(this);
//stream_.setVideoMode(default_mode_);
rc = stream_.start();
ROS_WARN_STREAM_COND(rc != STATUS_OK, "Recovery trial " << trials << " failed!");
}
ROS_ERROR_STREAM_COND(rc != STATUS_OK, "Failed to recover stream '" << name_ << "'! Restart required!");
ROS_INFO_STREAM_COND(rc == STATUS_OK, "Recovered stream '" << name_ << "'.");
}
if(rc == STATUS_OK)
{
running_ = true;
}
}
}
示例4: onSubscriptionChanged
virtual void onSubscriptionChanged(const image_transport::SingleSubscriberPublisher& topic)
{
if(topic.getNumSubscribers() > 0)
{
if(!running_ && stream_.start() == STATUS_OK)
{
running_ = true;
}
}
else
{
stream_.stop();
running_ = false;
}
}
示例5: Size
int
main (int argc, char** argv)
{
Status rc = OpenNI::initialize();
if (rc != STATUS_OK)
{
std::cout << "Initialize failed: " << OpenNI::getExtendedError() << std::endl;
return 1;
}
Device device;
rc = device.open(ANY_DEVICE);
if (rc != STATUS_OK)
{
std::cout << "Couldn't open device: " << OpenNI::getExtendedError() << std::endl;
return 2;
}
VideoStream stream;
if (device.getSensorInfo(currentSensor) != NULL)
{
rc = stream.create(device, currentSensor);
if (rc != STATUS_OK)
{
std::cout << "Couldn't create stream: " << OpenNI::getExtendedError() << std::endl;
return 3;
}
}
rc = stream.start();
if (rc != STATUS_OK)
{
std::cout << "Couldn't start the stream: " << OpenNI::getExtendedError() << std::endl;
return 4;
}
VideoFrameRef frame;
//now open the video writer
Size S = Size(stream.getVideoMode().getResolutionX(),
stream.getVideoMode().getResolutionY());
VideoWriter outputVideo;
std::string fileName = "out.avi";
outputVideo.open(fileName, -1, stream.getVideoMode().getFps(), S, currentSensor == SENSOR_COLOR ? true : false);
if (!outputVideo.isOpened())
{
std::cout << "Could not open the output video for write: " << fileName << std::endl;
return -1;
}
while (waitKey(50) == -1)
{
int changedStreamDummy;
VideoStream* pStream = &stream;
rc = OpenNI::waitForAnyStream(&pStream, 1, &changedStreamDummy, SAMPLE_READ_WAIT_TIMEOUT);
if (rc != STATUS_OK)
{
std::cout << "Wait failed! (timeout is " << SAMPLE_READ_WAIT_TIMEOUT << "ms): " << OpenNI::getExtendedError() << std::endl;
continue;
}
rc = stream.readFrame(&frame);
if (rc != STATUS_OK)
{
std::cout << "Read failed:" << OpenNI::getExtendedError() << std::endl;
continue;
}
Mat image;
switch (currentSensor)
{
case SENSOR_COLOR:
image = Mat(frame.getHeight(), frame.getWidth(), CV_8UC3, (void*)frame.getData());
break;
case SENSOR_DEPTH:
image = Mat(frame.getHeight(), frame.getWidth(), DataType<DepthPixel>::type, (void*)frame.getData());
break;
case SENSOR_IR:
image = Mat(frame.getHeight(), frame.getWidth(), CV_8U, (void*)frame.getData());
break;
default:
break;
}
namedWindow( "Display window", WINDOW_AUTOSIZE ); // Create a window for display.
imshow( "Display window", image ); // Show our image inside it.
outputVideo << image;
}
stream.stop();
stream.destroy();
device.close();
OpenNI::shutdown();
return 0;
}
示例6: main
int main()
{
// 2. initialize OpenNI
Status rc = OpenNI::initialize();
if (rc != STATUS_OK)
{
printf("Initialize failed\n%s\n", OpenNI::getExtendedError());
return 1;
}
// 3. open a device
Device device;
rc = device.open(ANY_DEVICE);
if (rc != STATUS_OK)
{
printf("Couldn't open device\n%s\n", OpenNI::getExtendedError());
return 2;
}
// 4. create depth stream
VideoStream depth;
if (device.getSensorInfo(SENSOR_DEPTH) != NULL){
rc = depth.create(device, SENSOR_DEPTH);
if (rc != STATUS_OK){
printf("Couldn't create depth stream\n%s\n", OpenNI::getExtendedError());
return 3;
}
}
VideoStream color;
if (device.getSensorInfo(SENSOR_COLOR) != NULL){
rc = color.create(device, SENSOR_COLOR);
if (rc != STATUS_OK){
printf("Couldn't create color stream\n%s\n", OpenNI::getExtendedError());
return 4;
}
}
// 5. create OpenCV Window
cv::namedWindow("Depth Image", CV_WINDOW_AUTOSIZE);
cv::namedWindow("Color Image", CV_WINDOW_AUTOSIZE);
// 6. start
rc = depth.start();
if (rc != STATUS_OK)
{
printf("Couldn't start the depth stream\n%s\n", OpenNI::getExtendedError());
return 5;
}
rc = color.start();
if (rc != STATUS_OK){
printf("Couldn't start the depth stream\n%s\n", OpenNI::getExtendedError());
return 6;
}
VideoFrameRef colorframe;
VideoFrameRef depthframe;
int iMaxDepth = depth.getMaxPixelValue();
int iColorFps = color.getVideoMode().getFps();
cv::Size iColorFrameSize = cv::Size(color.getVideoMode().getResolutionX(), color.getVideoMode().getResolutionY());
cv::Mat colorimageRGB;
cv::Mat colorimageBGR;
cv::Mat depthimage;
cv::Mat depthimageScaled;
#ifdef F_RECORDVIDEO
cv::VideoWriter outputvideo_color;
cv::FileStorage outputfile_depth;
time_t timenow = time(0);
tm ltime;
localtime_s(<ime, &timenow);
int tyear = 1900 + ltime.tm_year;
int tmouth = 1 + ltime.tm_mon;
int tday = ltime.tm_mday;
int thour = ltime.tm_hour;
int tmin = ltime.tm_min;
int tsecond = ltime.tm_sec;
string filename_rgb = "RGB/rgb_" + to_string(tyear) + "_" + to_string(tmouth) + "_" + to_string(tday)
+ "_" + to_string(thour) + "_" + to_string(tmin) + "_" + to_string(tsecond) + ".avi";
string filename_d = "D/d_" + to_string(tyear) + "_" + to_string(tmouth) + "_" + to_string(tday)
+ "_" + to_string(thour) + "_" + to_string(tmin) + "_" + to_string(tsecond) + ".yml";
outputvideo_color.open(filename_rgb, CV_FOURCC('I', '4', '2', '0'), iColorFps, iColorFrameSize, true);
if (!outputvideo_color.isOpened()){
cout << "Could not open the output color video for write: " << endl;
return 7;
}
outputfile_depth.open(filename_d, cv::FileStorage::WRITE);
if (!outputfile_depth.isOpened()){
cout << "Could not open the output depth file for write: " << endl;
return 8;
}
#endif // F_RECORDVIDEO
// 7. main loop, continue read
//.........这里部分代码省略.........
示例7: main
int main(int argc, char **argv){
printf("starting\n");
fflush(stdout);
ros::init(argc, argv, "xtion",ros::init_options::AnonymousName);
ros::NodeHandle n("~");
//Base topic name
n.param("topic", topic, string("/camera"));
//Resolution
//0 = 160x120
//1 = 320x240
n.param("depth_mode", _depth_mode, -1);
n.param("rgb_mode", _rgb_mode, -1);
n.param("sync", _sync, 0);
n.param("registration", _registration,0);
n.param("frame_id", frame_id, string("camera_frame"));
n.param("device_num", _device_num, -1);
n.param("device_uri", _device_uri, string("NA"));
n.param("frame_skip", _frame_skip, 0);
n.param("exposure", _exposure, -1);
n.param("gain", _gain, -1);
printf("Launched with params:\n");
printf("_device_num:= %d\n",_device_num);
printf("_device_uri:= %s\n",_device_uri.c_str());
printf("_topic:= %s\n",topic.c_str());
printf("_sync:= %d\n",_sync);
printf("_registration:= %d\n",_registration);
printf("_depth_mode:= %d\n",_depth_mode);
printf("_rgb_mode:= %d\n",_rgb_mode);
printf("_frame_id:= %s\n",frame_id.c_str());
printf("_frame_skip:= %d\n",_frame_skip);
printf("_exposure:= %d\n",_exposure);
printf("_gain:= %d\n",_gain);
fflush(stdout);
if (_frame_skip<=0)
_frame_skip = 1;
//OPENNI2 STUFF
//===================================================================
streams = new openni::VideoStream*[2];
streams[0]=&depth;
streams[1]=&rgb;
Status rc = OpenNI::initialize();
if (rc != STATUS_OK)
{
printf("Initialize failed\n%s\n", OpenNI::getExtendedError());
fflush(stdout);
return 1;
}
// enumerate the devices
openni::Array<openni::DeviceInfo> device_list;
openni::OpenNI::enumerateDevices(&device_list);
Device device;
if(_device_uri.compare("NA")){
string dev_uri("NA");
for (int i = 0; i<device_list.getSize(); i++){
if(!string(device_list[i].getUri()).compare(0, _device_uri.size(), _device_uri )){
dev_uri = device_list[i].getUri();
break;
}
}
if(!dev_uri.compare("NA")){
cerr << "cannot find device with uri starting for: " << _device_uri << endl;
}
rc = device.open(dev_uri.c_str());
}
else{
if (_device_num < 0){
cerr << endl << endl << "found " << device_list.getSize() << " devices" << endl;
for (int i = 0; i<device_list.getSize(); i++)
cerr << "\t num: " << i << " uri: " << device_list[i].getUri() << endl;
}
if (_device_num>=device_list.getSize() || _device_num<0 ) {
cerr << "device num: " << _device_num << " does not exist, aborting" << endl;
openni::OpenNI::shutdown();
return 0;
}
rc = device.open(device_list[_device_num].getUri());
}
if (rc != STATUS_OK){
printf("Couldn't open device\n%s\n", OpenNI::getExtendedError());
fflush(stdout);
return 2;
//.........这里部分代码省略.........
示例8: _tmain
int _tmain(int argc, _TCHAR* argv[])
{
DepthDetector detector(ThresholdMin, ThresholdMax);
ScanLineSegmenter segmenter;
OpenNI::initialize();
Device device;
if (device.open(ANY_DEVICE) != STATUS_OK)
{
std::cout << "could not open any device\r\n";
return 1;
}
if (device.hasSensor(SENSOR_DEPTH))
{
auto info = device.getSensorInfo(SENSOR_DEPTH);
auto& modes = info->getSupportedVideoModes();
std::cout << "depth sensor supported modes:\r\n";
for (int i = 0; i < modes.getSize(); ++i)
{
auto& mode = modes[i];
std::cout << "pixel format: " << mode.getPixelFormat() << "\t with: " << mode.getResolutionX() << "x" << mode.getResolutionY() << "@" << mode.getFps() << " fps\r\n";
}
}
VideoStream stream;
stream.create(device, SENSOR_DEPTH);
VideoMode mode;
mode.setFps(25);
mode.setPixelFormat(PIXEL_FORMAT_DEPTH_1_MM);
mode.setResolution(320, 240);
stream.setMirroringEnabled(true);
stream.setVideoMode(mode);
stream.start();
std::cout << "press any key to capture background\r\n";
std::cin.get();
VideoFrameRef frame;
stream.readFrame(&frame);
DepthImage image(320, 240);
copyFrameToImage(frame, image);
detector.background(image);
std::cout << "starting capture loop\r\n";
CenterPointExtractor centerPointExtractor(MinBlobSize);
std::chrono::high_resolution_clock timer;
auto startTime = timer.now();
int frameId = 0;
while (true)
{
stream.readFrame(&frame);
copyFrameToImage(frame, image);
detector.detect(image);
std::vector<LineSegment> segments;
segmenter.segment(detector.mask(), segments);
std::vector<std::pair<float, float>> centerPoints;
centerPointExtractor.extract(segments, centerPoints);
if (centerPoints.size())
{
std::cout << "point count: " << centerPoints.size();
std::cout << "\t points: ";
for (auto& point : centerPoints)
{
std::cout << "(" << point.first << ", " << point.second << ") ";
}
std::cout << "\r\n";
}
++frameId;
if (frameId % 64 == 0)
{
auto stopTime = timer.now();
auto elapsedTime = stopTime - startTime;
auto elapsedMilliseconds = std::chrono::duration_cast<std::chrono::milliseconds>(elapsedTime).count();
std::cout << "\t total frames: " << frameId << "\t fps: " << elapsedMilliseconds / 64 << std::endl;
startTime = stopTime;
}
}
openni::OpenNI::shutdown();
//.........这里部分代码省略.........
示例9: _tmain
int _tmain(int argc, _TCHAR* argv[])
{
sdl::Application app;
DepthDetector detector(ThresholdMin, ThresholdMax);
ScanLineSegmenter segmenter;
OpenNI::initialize();
Device device;
if (device.open(ANY_DEVICE) != STATUS_OK)
{
std::cout << "could not open any device\r\n";
return 1;
}
if (device.hasSensor(SENSOR_DEPTH))
{
auto info = device.getSensorInfo(SENSOR_DEPTH);
auto& modes = info->getSupportedVideoModes();
std::cout << "depth sensor supported modes:\r\n";
for (int i = 0; i < modes.getSize(); ++i)
{
auto& mode = modes[i];
std::cout << "pixel format: " << mode.getPixelFormat() << "\t with: " << mode.getResolutionX() << "x" << mode.getResolutionY() << "@" << mode.getFps() << " fps\r\n";
}
}
VideoStream stream;
stream.create(device, SENSOR_DEPTH);
VideoMode mode;
mode.setFps(25);
mode.setPixelFormat(PIXEL_FORMAT_DEPTH_1_MM);
mode.setResolution(320, 240);
stream.setMirroringEnabled(true);
stream.setVideoMode(mode);
stream.start();
std::cout << "press any key to capture background\r\n";
std::cin.get();
VideoFrameRef frame;
stream.readFrame(&frame);
DepthImage image(320, 240);
copyFrameToImage(frame, image);
detector.background(image);
std::cout << "starting capture loop\r\n";
sdl::GLContext::setVersion(4, 3);
ImageViewer viewer;
viewer.add(0, 0, 320, 240);
viewer.add(320, 0, 320, 240);
viewer.add(0, 240, 320, 240);
viewer.add(320, 240, 320, 240);
CenterPointExtractor centerPointExtractor(MinBlobSize);
MotionRecorder recorder;
while (true)
{
stream.readFrame(&frame);
copyFrameToImage(frame, image);
detector.detect(image);
std::vector<LineSegment> segments;
segmenter.segment(detector.mask(), segments);
std::vector<std::pair<float, float>> centerPoints;
centerPointExtractor.extract(segments, centerPoints);
recorder.track(centerPoints);
viewer.crosses.clear();
std::transform(begin(centerPoints), end(centerPoints), std::back_inserter(viewer.crosses), [](std::pair<float, float>& coord) {
return Cross{ coord.first, coord.second };
});
viewer.lines.clear();
std::transform(begin(recorder.motions()), end(recorder.motions()), std::back_inserter(viewer.lines), [](const Motion& motion) {
return Lines{ motion.points };
});
viewer[0].update(detector.mask());
viewer[1].update(image);
viewer[2].update(detector.background());
viewer[3].update(detector.difference());
viewer.update();
}
openni::OpenNI::shutdown();
return 0;
}
示例10: main
//.........这里部分代码省略.........
Device devAnyDevice;
devAnyDevice.open(ANY_DEVICE);
//----------------[Define Video Settings]-------------------
//Set Properties of Depth Stream
VideoMode mModeDepth;
mModeDepth.setResolution( 640, 480 );
mModeDepth.setFps( 30 );
mModeDepth.setPixelFormat( PIXEL_FORMAT_DEPTH_100_UM );
//Set Properties of Color Stream
VideoMode mModeColor;
mModeColor.setResolution( 640, 480 );
mModeColor.setFps( 30 );
mModeColor.setPixelFormat( PIXEL_FORMAT_RGB888 );
//----------------------------------------------------------
//----------------------[Initial Streams]---------------------
VideoStream streamInitDepth;
streamInitDepth.create( devAnyDevice, SENSOR_DEPTH );
VideoStream streamInitColor;
streamInitColor.create( devAnyDevice, SENSOR_COLOR );
streamInitDepth.setVideoMode( mModeDepth );
streamInitColor.setVideoMode( mModeColor );
namedWindow( "Depth Image (Init)", CV_WINDOW_AUTOSIZE );
namedWindow( "Color Image (Init)", CV_WINDOW_AUTOSIZE );
//namedWindow( "Thresholded Image (Init)", CV_WINDOW_AUTOSIZE );
VideoFrameRef frameDepthInit;
VideoFrameRef frameColorInit;
streamInitDepth.start();
streamInitColor.start();
cv::Mat BackgroundFrame;
int avgDist = 0;
int iMaxDepthInit = streamInitDepth.getMaxPixelValue();
OutX.clear();
OutY.clear();
vector<int> OldOutX, OldOutY;
OldOutX.clear();
OldOutY.clear();
//------------------------------------------------------------
//--------------------[Initiation Process]--------------------
while( true )
{
streamInitDepth.readFrame( &frameDepthInit );
streamInitColor.readFrame( &frameColorInit );
const cv::Mat mImageDepth( frameDepthInit.getHeight(), frameDepthInit.getWidth(), CV_16UC1, (void*)frameDepthInit.getData());
cv::Mat mScaledDepth;
mImageDepth.convertTo( mScaledDepth, CV_8U, 255.0 / iMaxDepthInit );
cv::imshow( "Depth Image (Init)", mScaledDepth );
const cv::Mat mImageRGB(frameColorInit.getHeight(), frameColorInit.getWidth(), CV_8UC3, (void*)frameColorInit.getData());
cv::Mat cImageBGR;
cv::cvtColor( mImageRGB, cImageBGR, CV_RGB2BGR );
//--------------------[Get Average Distance]---------------------
示例11: initializeOpenNIDevice
int initializeOpenNIDevice(int deviceID ,const char * deviceName , Device &device , VideoStream &color , VideoStream &depth ,unsigned int width ,unsigned int height , unsigned int fps)
{
unsigned int openMode=OPENNI2_OPEN_REGULAR_ENUM; /* 0 = regular deviceID and enumeration*/
if (deviceName!=0)
{
//If our deviceName contains a .oni we assume that we have an oni file to open
if (strstr(deviceName,".oni")!=0)
{
fprintf(stderr,"Found an .ONI filename , trying to open it..\n");
openMode=OPENNI2_OPEN_USING_STRING;
} else
if (strlen(deviceName)>7)
{
fprintf(stderr,"deviceName is too long (%lu chars) , assuming it is a Device URI ..\n",strlen(deviceName));
openMode=OPENNI2_OPEN_USING_STRING;
}
}
switch (openMode)
{
//-------------------------------------------------------------------------------------
//If we have an ONI file to open just pass it as an argument to device.open(deviceName)
case OPENNI2_OPEN_USING_STRING :
if (device.open(deviceName) != STATUS_OK)
{
fprintf(stderr,"Could not open using given string ( %s ) : %s \n",deviceName,OpenNI::getExtendedError());
return 0;
}
break;
//-------------------------------------------------------------------------------------
//If we don't have a deviceName we assume deviceID points to the device we want to open so we will try to use
//the openNI enumerator to get the specific device URI for device with number deviceID and use this to device.open( devURI )
case OPENNI2_OPEN_REGULAR_ENUM :
default :
//We have to supply our own buffer to hold the uri device string , so we make one here
char devURIBuffer[512]={0};
if (device.open(getURIForDeviceNumber(deviceID,devURIBuffer,512)) != STATUS_OK)
{
fprintf(stderr,"Could not open an OpenNI device : %s \n",OpenNI::getExtendedError());
return 0;
}
break;
}
if (device.getSensorInfo(SENSOR_DEPTH) != NULL)
{
Status rc = depth.create(device, SENSOR_DEPTH);
if (rc == STATUS_OK)
{
VideoMode depthMode = depth.getVideoMode();
depthMode.setResolution(width,height);
depthMode.setFps(fps);
Status rc = depth.setVideoMode(depthMode);
if (rc != STATUS_OK) { fprintf(stderr,"Error getting color at video mode requested %u x %u @ %u fps\n%s\n",width,height,fps,OpenNI::getExtendedError()); }
if(depth.start()!= STATUS_OK)
{
fprintf(stderr,"Couldn't start the color stream: %s \n",OpenNI::getExtendedError());
return 0;
}
}
else
{
fprintf(stderr,"Couldn't create depth stream: %s \n",OpenNI::getExtendedError());
return 0;
}
}
if (device.getSensorInfo(SENSOR_COLOR) != NULL)
{
Status rc = color.create(device, SENSOR_COLOR);
if (rc == STATUS_OK)
{
VideoMode colorMode = color.getVideoMode();
colorMode.setResolution(width,height);
colorMode.setFps(fps);
Status rc = color.setVideoMode(colorMode);
if (rc != STATUS_OK) { fprintf(stderr,"Error getting depth at video mode requested %u x %u @ %u fps\n%s\n",width,height,fps,OpenNI::getExtendedError()); }
if(color.start() != STATUS_OK)
{
fprintf(stderr,"Couldn't start the color stream: %s \n",OpenNI::getExtendedError());
return 0;
}
}
else
{
fprintf(stderr,"Couldn't create depth stream: %s \n",OpenNI::getExtendedError());
OpenNI::getExtendedError();
return 0;
}
}
#if MOD_IR
if(device.getSensorInfo(SENSOR_IR) != NULL)
{
Status rc = ir.create(device, SENSOR_IR); // Create the VideoStream for IR
if (rc == STATUS_OK)
//.........这里部分代码省略.........
示例12: main
int main()
{
// 2. initialize OpenNI
Status rc = OpenNI::initialize();
if (rc != STATUS_OK)
{
printf("Initialize failed\n%s\n", OpenNI::getExtendedError());
return 1;
}
// 3. open a device
Device device;
rc = device.open(ANY_DEVICE);
if (rc != STATUS_OK)
{
printf("Couldn't open device\n%s\n", OpenNI::getExtendedError());
return 2;
}
// 4. create depth stream
VideoStream depth;
if (device.getSensorInfo(SENSOR_DEPTH) != NULL){
rc = depth.create(device, SENSOR_DEPTH);
if (rc != STATUS_OK){
printf("Couldn't create depth stream\n%s\n", OpenNI::getExtendedError());
return 3;
}
}
VideoStream color;
if (device.getSensorInfo(SENSOR_COLOR) != NULL){
rc = color.create(device, SENSOR_COLOR);
if (rc != STATUS_OK){
printf("Couldn't create color stream\n%s\n", OpenNI::getExtendedError());
return 4;
}
}
// 5. create OpenCV Window
cv::namedWindow("Depth Image", CV_WINDOW_AUTOSIZE);
cv::namedWindow("Color Image", CV_WINDOW_AUTOSIZE);
// 6. start
rc = depth.start();
if (rc != STATUS_OK)
{
printf("Couldn't start the depth stream\n%s\n", OpenNI::getExtendedError());
return 5;
}
rc = color.start();
if (rc != STATUS_OK){
printf("Couldn't start the depth stream\n%s\n", OpenNI::getExtendedError());
return 6;
}
VideoFrameRef colorframe;
VideoFrameRef depthframe;
int iMaxDepth = depth.getMaxPixelValue();
cv::Mat colorimageRGB;
cv::Mat colorimageBGR;
cv::Mat depthimage;
cv::Mat depthimageScaled;
// 7. main loop, continue read
while (!wasKeyboardHit())
{
// 8. check is color stream is available
if (color.isValid()){
if (color.readFrame(&colorframe) == STATUS_OK){
colorimageRGB = { colorframe.getHeight(), colorframe.getWidth(), CV_8UC3, (void*)colorframe.getData() };
cv::cvtColor(colorimageRGB, colorimageBGR, CV_RGB2BGR);
}
}
// 9. check is depth stream is available
if (depth.isValid()){
if (depth.readFrame(&depthframe) == STATUS_OK){
depthimage = { depthframe.getHeight(), depthframe.getWidth(), CV_16UC1, (void*)depthframe.getData() };
depthimage.convertTo(depthimageScaled, CV_8U, 255.0 / iMaxDepth);
}
}
cv::imshow("Color Image", colorimageBGR);
cv::imshow("Depth Image", depthimageScaled);
cv::waitKey(10);
}
color.stop();
depth.stop();
color.destroy();
depth.destroy();
device.close();
OpenNI::shutdown();
return 0;
}
示例13: main
int main()
{
Status rc = OpenNI::initialize();
if (rc != STATUS_OK)
{
printf("Initialize failed\n%s\n", OpenNI::getExtendedError());
return 1;
}
OpenNIDeviceListener devicePrinter;
OpenNI::addDeviceConnectedListener(&devicePrinter);
OpenNI::addDeviceDisconnectedListener(&devicePrinter);
OpenNI::addDeviceStateChangedListener(&devicePrinter);
openni::Array<openni::DeviceInfo> deviceList;
openni::OpenNI::enumerateDevices(&deviceList);
for (int i = 0; i < deviceList.getSize(); ++i)
{
printf("Device \"%s\" already connected\n", deviceList[i].getUri());
}
Device device;
rc = device.open(ANY_DEVICE);
if (rc != STATUS_OK)
{
printf("Couldn't open device\n%s\n", OpenNI::getExtendedError());
return 2;
}
VideoStream depth;
if (device.getSensorInfo(SENSOR_DEPTH) != NULL)
{
rc = depth.create(device, SENSOR_DEPTH);
if (rc != STATUS_OK)
{
printf("Couldn't create depth stream\n%s\n", OpenNI::getExtendedError());
}
}
rc = depth.start();
if (rc != STATUS_OK)
{
printf("Couldn't start the depth stream\n%s\n", OpenNI::getExtendedError());
}
PrintCallback depthPrinter;
// Register to new frame
depth.addNewFrameListener(&depthPrinter);
int i = 1;
while(i > 0)
{
scanf_s("%d", i);
printf("%d\n", i);
}
depth.removeNewFrameListener(&depthPrinter);
depth.stop();
depth.destroy();
device.close();
OpenNI::shutdown();
return 0;
}