本文整理汇总了C++中openni::VideoStream::getVideoMode方法的典型用法代码示例。如果您正苦于以下问题:C++ VideoStream::getVideoMode方法的具体用法?C++ VideoStream::getVideoMode怎么用?C++ VideoStream::getVideoMode使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类openni::VideoStream
的用法示例。
在下文中一共展示了VideoStream::getVideoMode方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: initONI2RGBStream
bool initONI2RGBStream(openni::Device& device, openni::VideoStream& rgb, int w, int h, int fps, openni::PixelFormat format){
openni::Status rc = openni::STATUS_OK;
rc = rgb.create(device, openni::SENSOR_COLOR);
if(rc != openni::STATUS_OK){
printf("%s:Couldn't find RGB stream:\n%s\n", __FUNCTION__, openni::OpenNI::getExtendedError());
return false;
}
rc = rgb.setMirroringEnabled(false);
if (rc != openni::STATUS_OK){
printf("%s:setMirroringEnabled(false) failed:\n%s\n", __FUNCTION__, openni::OpenNI::getExtendedError());
return false;
}
openni::VideoMode options = rgb.getVideoMode();
printf("Initial resolution RGB (%d, %d) FPS %d Format %d\n", options.getResolutionX(), options.getResolutionY(), options.getFps(), options.getPixelFormat());
if(setONI2StreamMode(rgb, w, h, fps, format) == false){
printf("%s:Can't find desired rgb mode\n", __FUNCTION__ );
return false;
}
options = rgb.getVideoMode();
printf(" -> (%d, %d) FPS %d Format %d\n", options.getResolutionX(), options.getResolutionY(), options.getFps(), options.getPixelFormat());
rc = rgb.start();
if (rc != openni::STATUS_OK){
printf("%s:Couldn't start RGB stream:\n%s\n", __FUNCTION__, openni::OpenNI::getExtendedError());
rgb.destroy();
return false;
}
return true;
}
示例2: initONI2Stream
bool initONI2Stream(openni::Device& device, openni::SensorType sensorType, openni::VideoStream& stream, int w, int h, int fps, openni::PixelFormat format){
openni::Status rc = openni::STATUS_OK;
const char* strSensor;
if(sensorType == openni::SENSOR_COLOR){
strSensor = "openni::SENSOR_COLOR";
}else if(sensorType == openni::SENSOR_DEPTH){
strSensor = "openni::SENSOR_DEPTH";
}else{
printf("%s:Unknown SensorType -> %d\n", __FUNCTION__, sensorType);
return false;
}
rc = stream.create(device, sensorType);
if(rc != openni::STATUS_OK){
printf("%s:Couldn't find sensor %s: %s\n", __FUNCTION__, strSensor, openni::OpenNI::getExtendedError());
return false;
}
openni::VideoMode options = stream.getVideoMode();
printf("%s:Initial resolution %s (%d, %d) FPS %d Format %d\n", __FUNCTION__, strSensor, options.getResolutionX(), options.getResolutionY(), options.getFps(), options.getPixelFormat());
if(setONI2StreamMode(stream, w, h, fps, format) == false){
printf("%s:Can't find desired mode in the %s\n", __FUNCTION__, strSensor);
return false;
}
options = stream.getVideoMode();
printf(" -> (%d, %d) FPS %d Format %d\n", options.getResolutionX(), options.getResolutionY(), options.getFps(), options.getPixelFormat());
return true;
}
示例3: initONI2DepthStream
bool initONI2DepthStream(openni::Device& device, openni::VideoStream& depth, int w, int h, int fps, openni::PixelFormat format){
openni::Status rc = depth.create(device, openni::SENSOR_DEPTH);
if (rc != openni::STATUS_OK){
printf("%s:Couldn't find depth stream:\n%s\n", __FUNCTION__, openni::OpenNI::getExtendedError());
return false;
}
openni::VideoMode options = depth.getVideoMode();
printf("Initial resolution Depth(%d, %d) FPS %d Format %d\n", options.getResolutionX(), options.getResolutionY(), options.getFps(), options.getPixelFormat());
if(setONI2StreamMode(depth, w, h, fps, format) == false){
printf("%s:Can't find desired depth mode\n", __FUNCTION__ );
return false;
}
options = depth.getVideoMode();
printf(" -> (%d, %d) FPS %d Format %d\n", options.getResolutionX(), options.getResolutionY(), options.getFps(), options.getPixelFormat());
depth.setMirroringEnabled(false);
rc = depth.start();
if (rc != openni::STATUS_OK){
printf("Couldn't start depth stream:\n%s\n", openni::OpenNI::getExtendedError());
depth.destroy();
return false;
}
return true;
}
示例4: onNewFrame
void streamFrameListener::onNewFrame(openni::VideoStream& stream)
{
LockGuard guard(mutex);
stream.readFrame(&frameRef);
if (!frameRef.isValid() || !frameRef.getData())
{
yInfo() << "frame lost";
return;
}
int pixC;
pixF = stream.getVideoMode().getPixelFormat();
pixC = depthCameraDriver::pixFormatToCode(pixF);
w = frameRef.getWidth();
h = frameRef.getHeight();
dataSize = frameRef.getDataSize();
if (isReady == false)
{
isReady = true;
}
if(pixC == VOCAB_PIXEL_INVALID)
{
yError() << "depthCameraDriver: Pixel Format not recognized";
return;
}
image.setPixelCode(pixC);
image.resize(w, h);
if(image.getRawImageSize() != frameRef.getDataSize())
{
yError() << "depthCameraDriver:device and local copy data size doesn't match";
return;
}
memcpy((void*)image.getRawImage(), (void*)frameRef.getData(), frameRef.getDataSize());
stamp.update();
return;
}
示例5: drawCropStream
void drawCropStream(openni::VideoStream& stream, IntRect location, IntRect selection, int dividedBy)
{
if (!stream.isCroppingSupported())
{
return;
}
openni::VideoMode Mode = stream.getVideoMode();
// check if entire selection is in location
if (selection.uLeft >= location.uLeft &&
selection.uRight <= location.uRight &&
selection.uBottom >= location.uBottom &&
selection.uTop <= location.uTop)
{
IntRect cropRect;
cropRect.uBottom = Mode.getResolutionY() * (selection.uBottom - location.uBottom) / (location.uTop - location.uBottom);
cropRect.uTop = Mode.getResolutionY() * (selection.uTop - location.uBottom) / (location.uTop - location.uBottom);
cropRect.uLeft = Mode.getResolutionX() * (selection.uLeft - location.uLeft) / (location.uRight - location.uLeft);
cropRect.uRight = Mode.getResolutionX() * (selection.uRight - location.uLeft) / (location.uRight - location.uLeft);
int originX, originY, width, height;
originX = cropRect.uLeft;
originY = cropRect.uBottom;
width = cropRect.uRight - cropRect.uLeft;
height = cropRect.uTop - cropRect.uBottom;
if ((originX % dividedBy) != 0)
originX -= (originX % dividedBy);
if ((width % dividedBy) != 0)
width += dividedBy - (width % dividedBy);
setStreamCropping(stream, originX, originY, width, height);
}
}
示例6: initialize
int SensorOpenNI::initialize()
{
LOG(INFO) << "Initializing OpenNI";
///< force shutdown before starting!!
kinect::OpenNI::shutdown();
kinect::Status rc;
rc = kinect::STATUS_OK;
/// Fetch the device URI to pass to Device::open()
const char* deviceURI = kinect::ANY_DEVICE;
/// Initialize the device
rc = kinect::OpenNI::initialize();
if(rc!=kinect::STATUS_OK)
{
mDebug()<<"Initialization Errors (if any): "<< kinect::OpenNI::getExtendedError();
kinect::OpenNI::shutdown();
exit(0);
}
/// Open the device using the previously fetched device URI
rc = device.open(deviceURI);
if (rc != kinect::STATUS_OK)
{
mDebug()<<"Device open failed: "<<kinect::OpenNI::getExtendedError();
kinect::OpenNI::shutdown();
exit(0);
}
/// Create the depth stream
rc = g_depthStream.create(device, kinect::SENSOR_DEPTH);
if (rc == kinect::STATUS_OK)
{
/// start the depth stream, if its creation was successful
rc = g_depthStream.start();
if (rc != kinect::STATUS_OK)
{
mDebug()<<"Couldn't start depth stream: "<<kinect::OpenNI::getExtendedError();
g_depthStream.destroy();
exit(0);
}
}
else
{
mDebug()<<"Couldn't find depth stream: "<<kinect::OpenNI::getExtendedError();
exit(0);
}
if (!g_depthStream.isValid())
{
mDebug()<<"No valid depth streams. Exiting";
kinect::OpenNI::shutdown();
exit(0);
}
/// Create the color stream
rc = g_colorStream.create(device, kinect::SENSOR_COLOR);
if (rc == kinect::STATUS_OK)
{
/// start the color stream, if its creation was successful
rc = g_colorStream.start();
if (rc != kinect::STATUS_OK)
{
mDebug()<<"Couldn't start color stream: "<<kinect::OpenNI::getExtendedError();
g_colorStream.destroy();
exit(0);
}
}
else
{
mDebug()<<"Couldn't find color stream: "<<kinect::OpenNI::getExtendedError();
exit(0);
}
if (!g_colorStream.isValid())
{
mDebug()<<"No valid color streams. Exiting";
kinect::OpenNI::shutdown();
exit(0);
}
/// Configure resolutions
{
/// Attempt to set for depth
{
kinect::VideoMode mode = g_depthStream.getVideoMode();
if(((int)camera->FPS())==60)
mode.setFps(60);
else
mode.setFps(30);
mode.setResolution(camera->width(), camera->height());
rc = g_depthStream.setVideoMode(mode);
if (rc != kinect::STATUS_OK)
std::cerr << "error setting video mode (depth)" << std::endl;
}
/// Attempt to set for color
//.........这里部分代码省略.........
示例7: tan
float w = g_depthStream.getVideoMode().getResolutionX();protected:
Camera*const camera;
/// Device
kinect::Device device;
bool initialized;
/// Streams
kinect::VideoStream g_depthStream;
kinect::VideoStream g_colorStream;
/// Frames
kinect::VideoFrameRef g_depthFrame;
kinect::VideoFrameRef g_colorFrame;
float fov_h = g_depthStream.getHorizontalFieldOfView();
float fl_h = .5*w / tan(.5*fov_h);
float h = g_depthStream.getVideoMode().getResolutionY();
float fov_v = g_depthStream.getVerticalFieldOfView();
float fl_v = .5*h / tan(.5*fov_v);
std::cout << "cameras focal lengths" << fl_h << fl_v;
#endif
initialized = true;
return true;
}
SensorOpenNI::~SensorOpenNI()
{
if(initialized){
LOG(INFO) << "Shutting down Kinect...";
flush(std::cout);
g_depthStream.destroy();
示例8: getFrameWidth
int getFrameWidth() const{ return m_stream.getVideoMode().getResolutionX(); }