本文整理汇总了C++中KFusion类的典型用法代码示例。如果您正苦于以下问题:C++ KFusion类的具体用法?C++ KFusion怎么用?C++ KFusion使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了KFusion类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main
int main(int argc, char ** argv) {
const float size = (argc > 1) ? atof(argv[1]) : 2.f;
KFusionConfig config;
// it is enough now to set the volume resolution once.
// everything else is derived from that.
// config.volumeSize = make_uint3(64);
config.volumeSize = make_uint3(128);
// config.volumeSize = make_uint3(256);
// these are physical dimensions in meters
config.volumeDimensions = make_float3(size);
config.nearPlane = 0.4f;
config.farPlane = 5.0f;
config.mu = 0.1;
config.combinedTrackAndReduce = false;
// change the following parameters for using 640 x 480 input images
config.inputSize = make_uint2(320,240);
config.camera = make_float4(297.12732, 296.24240, 169.89365, 121.25151);
// config.iterations is a vector<int>, the length determines
// the number of levels to be used in tracking
// push back more then 3 iteraton numbers to get more levels.
config.iterations[0] = 10;
config.iterations[1] = 5;
config.iterations[2] = 4;
config.dist_threshold = (argc > 2 ) ? atof(argv[2]) : config.dist_threshold;
config.normal_threshold = (argc > 3 ) ? atof(argv[3]) : config.normal_threshold;
initPose = SE3<float>(makeVector(size/2, size/2, 0, 0, 0, 0));
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE );
glutInitWindowSize(config.inputSize.x * 2, config.inputSize.y * 2);
glutCreateWindow("kfusion");
kfusion.Init(config);
if(printCUDAError())
exit(1);
kfusion.setPose(toMatrix4(initPose));
lightScene.alloc(config.inputSize), depth.alloc(config.inputSize), lightModel.alloc(config.inputSize);
depthImage.alloc(make_uint2(640, 480));
if(InitKinect(depthImage.data()))
exit(1);
atexit(exitFunc);
glutDisplayFunc(display);
glutKeyboardFunc(keys);
glutReshapeFunc(reshape);
glutIdleFunc(idle);
glutMainLoop();
return 0;
}
示例2: main
int main(int argc, char ** argv) {
const float default_size = 2.f;
KFusionConfig config;
// Search for --help argument
for (int i = 0; i < argc; ++i) {
if (std::string(argv[i]) == "--help") {
std::cout << "Usage: kinect [size] [dist_threshold] [normal_threshold]" << std::endl;
std::cout << std::endl;
std::cout << "Defaults:" << std::endl;
std::cout << " size: " << default_size << std::endl;
std::cout << " dist_threshold: " << config.dist_threshold << std::endl;
std::cout << " normal_threshold: " << config.normal_threshold << std::endl;
return 0;
}
}
const float size = (argc > 1) ? atof(argv[1]) : default_size;
// it is enough now to set the volume resolution once.
// everything else is derived from that.
// config.volumeSize = make_uint3(64);
// config.volumeSize = make_uint3(128);
// config.volumeSize = make_uint3(256);
config.volumeSize = make_uint3(512);
// these are physical dimensions in meters
config.volumeDimensions = make_float3(size);
config.nearPlane = 0.4f;
config.farPlane = 5.0f;
config.mu = 0.1;
config.combinedTrackAndReduce = false;
// change the following parameters for using 640 x 480 input images
config.inputSize = make_uint2(320,240);
// config.inputSize = make_uint2(640,480);
config.camera = make_float4(531.15/2, 531.15/2, 640/4, 480/4);
// config.camera = make_float4(614.221/2, 614.221/2, 640/4, 480/4);
// config.iterations is a vector<int>, the length determines
// the number of levels to be used in tracking
// push back more then 3 iteraton numbers to get more levels.
config.iterations[0] = 10;
config.iterations[1] = 5;
config.iterations[2] = 4;
config.dist_threshold = (argc > 2 ) ? atof(argv[2]) : config.dist_threshold;
config.normal_threshold = (argc > 3 ) ? atof(argv[3]) : config.normal_threshold;
initPose = SE3<float>(makeVector(size/2, size/2, 0, 0, 0, 0));
// rgbdDevice = RGBD::create(RGBD::kRGBDDeviceKinect);
// rgbdDevice = RGBD::create(RGBD::kRGBDRealSense);
rgbdDevice = RGBD::create(RGBD::kRGBDDeviceOpenNI2);
if (rgbdDevice == 0L) {
std::cerr << "no capture device" << std::endl;
return -1;
}
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE );
glutInitWindowSize(config.inputSize.x * 2 + 640, max(config.inputSize.y * 2, 480));
glutCreateWindow("kfusion");
kfusion.Init(config);
// input buffers
depthImage[0].alloc(make_uint2(640, 480));
depthImage[1].alloc(make_uint2(640, 480));
rgbImage.alloc(make_uint2(640, 480));
// render buffers
lightScene.alloc(config.inputSize), trackModel.alloc(config.inputSize), lightModel.alloc(config.inputSize);
pos.alloc(make_uint2(640, 480)), normals.alloc(make_uint2(640, 480)), dep.alloc(make_uint2(640, 480)), texModel.alloc(make_uint2(640, 480));
if(printCUDAError()) {
cudaDeviceReset();
return 1;
}
std::cout << "Using depthImage size: " << depthImage[0].size.x*depthImage[0].size.y * sizeof(uint16_t) << " bytes " << std::endl;
std::cout << "Using rgbImage size: " << rgbImage.size.x*rgbImage.size.y * sizeof(uchar3) << " bytes " << std::endl;
memset(depthImage[0].data(), 0, depthImage[0].size.x*depthImage[0].size.y * sizeof(uint16_t));
memset(depthImage[1].data(), 0, depthImage[1].size.x*depthImage[1].size.y * sizeof(uint16_t));
memset(rgbImage.data(), 0, rgbImage.size.x*rgbImage.size.y * sizeof(uchar3));
uint16_t * buffers[2] = {depthImage[0].data(), depthImage[1].data()};
rgbdDevice->setBuffers(buffers, (unsigned char *)rgbImage.data());
if (rgbdDevice->open()){
cudaDeviceReset();
//.........这里部分代码省略.........
示例3: main
int main(int argc, char ** argv) {
benchmark = argc > 1 && string(argv[1]) == "-b";
KFusionConfig config;
config.volumeSize = make_uint3(128);
config.combinedTrackAndReduce = false;
config.iterations[0] = 10;
config.iterations[1] = 5;
config.iterations[2] = 5;
config.inputSize = make_uint2(320, 240);
config.camera = make_float4(100, 100, 160, 120);
config.nearPlane = 0.001;
config.maxweight = 100;
config.mu = 0.1;
config.dist_threshold = 0.2f;
config.normal_threshold = 0.8f;
kfusion.Init(config);
if(printCUDAError()){
cudaDeviceReset();
exit(1);
}
reference.init(config.volumeSize, config.volumeDimensions);
initVolumeWrap(reference, 1.0f);
setBoxWrap(reference, make_float3(0.1f,0.1f,0.8f), make_float3(0.9f, 0.9f, 0.9f), -1.0f);
setBoxWrap(reference, make_float3(0.1f,0.8f,0.1f), make_float3(0.9f, 0.9f, 0.9f), -1.0f);
setBoxWrap(reference, make_float3(0.8f,0.1f,0.1f), make_float3(0.9f, 0.9f, 0.9f), -1.0f);
setSphereWrap(reference, make_float3(0.5f), 0.2f, -1.0f);
kfusion.setPose( toMatrix4( trans * rot * preTrans ));
vertex.alloc(config.inputSize);
normal.alloc(config.inputSize);
depth.alloc(config.inputSize);
rgb.alloc(config.inputSize);
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE );
glutInitWindowSize(config.inputSize.x * 3, config.inputSize.y * 3);
glutCreateWindow("kfusion test");
glutDisplayFunc(display);
glutKeyboardFunc(keys);
glutSpecialFunc(specials);
glutReshapeFunc(reshape);
glutIdleFunc(idle);
glutMainLoop();
cudaDeviceReset();
return 0;
}
示例4: display
void display(void){
const uint2 imageSize = kfusion.configuration.inputSize;
static bool integrate = true;
glClear( GL_COLOR_BUFFER_BIT );
const double startFrame = Stats.start();
DepthFrameKinect();
const double startProcessing = Stats.sample("kinect");
kfusion.setKinectDeviceDepth(depthImage.getDeviceImage());
Stats.sample("raw to cooked");
integrate = kfusion.Track();
Stats.sample("track");
if(integrate || reset){
kfusion.Integrate();
Stats.sample("integrate");
reset = false;
}
renderLight( lightModel.getDeviceImage(), kfusion.vertex, kfusion.normal, light, ambient);
renderLight( lightScene.getDeviceImage(), kfusion.inputVertex[0], kfusion.inputNormal[0], light, ambient );
renderTrackResult( depth.getDeviceImage(), kfusion.reduction );
cudaDeviceSynchronize();
Stats.sample("render");
glClear(GL_COLOR_BUFFER_BIT);
glRasterPos2i(0,imageSize.y * 0);
glDrawPixels(lightScene);
glRasterPos2i(imageSize.x, imageSize.y * 0);
glDrawPixels(depth);
glRasterPos2i(0,imageSize.y * 1);
glDrawPixels(lightModel);
const double endProcessing = Stats.sample("draw");
Stats.sample("total", endProcessing - startFrame, PerfStats::TIME);
Stats.sample("total_proc", endProcessing - startProcessing, PerfStats::TIME);
if(printCUDAError())
exit(1);
++counter;
if(counter % 50 == 0){
Stats.print();
Stats.reset();
cout << endl;
}
glutSwapBuffers();
}
示例5: keys
void keys(unsigned char key, int x, int y){
switch(key){
case 'c':
kfusion.Reset();
kfusion.setPose(toMatrix4(initPose));
reset = true;
break;
case 'q':
exit(0);
break;
}
}
示例6: exitFunc
void exitFunc(void){
// CloseKinect();
rgbdDevice->close();
delete rgbdDevice;
kfusion.Clear();
cudaDeviceReset();
}
示例7: keys
void keys(unsigned char key, int x, int y){
switch(key){
case 'c':
kfusion.Reset();
kfusion.setPose(toMatrix4(initPose));
reset = true;
break;
case 'q':
exit(0);
break;
case 'i':
should_integrate = !should_integrate;
break;
case 't':
render_texture = !render_texture;
break;
}
}
示例8: keys
void keys(unsigned char key, int x, int y) {
switch(key){
case 'r':
kfusion.setPose( toMatrix4( trans * rot * preTrans ));
break;
case 'c':
kfusion.Reset();
kfusion.setPose( toMatrix4( trans * rot * preTrans ));
break;
case 'd':
cout << kfusion.pose << endl;
break;
case 'q':
exit(0);
break;
}
glutPostRedisplay();
}
示例9: exitFunc
void exitFunc(void){
CloseKinect();
kfusion.Clear();
cudaDeviceReset();
}
示例10: display
void display(void){
const uint2 imageSize = kfusion.configuration.inputSize;
static bool integrate = true;
glClear( GL_COLOR_BUFFER_BIT );
const double startFrame = Stats.start();
const double startProcessing = Stats.sample("kinect");
// kfusion.setKinectDeviceDepth(depthImage[GetKinectFrame()].getDeviceImage());
kfusion.setKinectDeviceDepth(depthImage[rgbdDevice->currentDepthBufferIndex()].getDeviceImage());
Stats.sample("raw to cooked");
integrate = kfusion.Track();
Stats.sample("track");
if((should_integrate && integrate && ((counter % integration_rate) == 0)) || reset){
kfusion.Integrate();
kfusion.Raycast();
Stats.sample("integrate");
if(counter > 2) // use the first two frames to initialize
reset = false;
}
renderLight( lightScene.getDeviceImage(), kfusion.inputVertex[0], kfusion.inputNormal[0], light, ambient );
renderLight( lightModel.getDeviceImage(), kfusion.vertex, kfusion.normal, light, ambient);
renderTrackResult(trackModel.getDeviceImage(), kfusion.reduction);
static int count = 4;
if(count > 3 || redraw_big_view){
renderInput( pos, normals, dep, kfusion.integration, toMatrix4( trans * rot * preTrans ) * getInverseCameraMatrix(kfusion.configuration.camera * 2), kfusion.configuration.nearPlane, kfusion.configuration.farPlane, kfusion.configuration.stepSize(), 0.75 * kfusion.configuration.mu);
count = 0;
redraw_big_view = false;
} else
count++;
if(render_texture)
renderTexture( texModel.getDeviceImage(), pos, normals, rgbImage.getDeviceImage(), getCameraMatrix(2*kfusion.configuration.camera) * inverse(kfusion.pose), light);
else
renderLight( texModel.getDeviceImage(), pos, normals, light, ambient);
cudaDeviceSynchronize();
Stats.sample("render");
glClear(GL_COLOR_BUFFER_BIT);
glRasterPos2i(0, 0);
glDrawPixels(lightScene); // left top
glRasterPos2i(0, 240);
glPixelZoom(0.5, -0.5);
glDrawPixels(rgbImage); // left bottom
glPixelZoom(1,-1);
glRasterPos2i(320,0);
glDrawPixels(lightModel); // middle top
glRasterPos2i(320,240);
glDrawPixels(trackModel); // middle bottom
glRasterPos2i(640, 0);
glDrawPixels(texModel); // right
const double endProcessing = Stats.sample("draw");
Stats.sample("total", endProcessing - startFrame, PerfStats::TIME);
Stats.sample("total_proc", endProcessing - startProcessing, PerfStats::TIME);
if(printCUDAError())
exit(1);
++counter;
if(counter % 50 == 0){
Stats.print();
Stats.reset();
std::cout << std::endl;
}
glutSwapBuffers();
}
示例11: display
void display(void) {
static bool integrate = true;
const uint2 imageSize = kfusion.configuration.inputSize;
const double start = Stats.start();
renderInput(vertex.getDeviceImage(), normal.getDeviceImage(), depth.getDeviceImage(), reference, toMatrix4( trans * rot * preTrans ) * getInverseCameraMatrix(kfusion.configuration.camera), kfusion.configuration.nearPlane, kfusion.configuration.farPlane, kfusion.configuration.stepSize(), 0.01 );
cudaDeviceSynchronize();
Stats.sample("ground raycast");
Stats.sample("ground copy");
glRasterPos2i(0,0);
glDrawPixels(vertex);
glRasterPos2i(imageSize.x, 0);
glDrawPixels(normal);
glRasterPos2i(imageSize.x * 2, 0);
glDrawPixels(depth);
Stats.sample("ground draw");
kfusion.setDepth( depth.getDeviceImage() );
cudaDeviceSynchronize();
const double track_start = Stats.sample("process depth");
if(counter > 1){
integrate = kfusion.Track();
cudaDeviceSynchronize();
Stats.sample("track");
}
renderTrackResult(rgb.getDeviceImage(), kfusion.reduction);
cudaDeviceSynchronize();
Stats.sample("track render");
Stats.sample("track copy");
if(integrate){
kfusion.Integrate();
cudaDeviceSynchronize();
Stats.sample("integration");
kfusion.Raycast();
cudaDeviceSynchronize();
Stats.sample("raycast");
vertex = kfusion.vertex;
normal = kfusion.normal;
Stats.sample("raycast get");
}
glRasterPos2i(0,imageSize.y * 1);
glDrawPixels(vertex);
glRasterPos2i(imageSize.x, imageSize.y * 1);
glDrawPixels(normal);
glRasterPos2i(2 * imageSize.x, imageSize.y * 1);
glDrawPixels(rgb);
Stats.sample("track draw");
Stats.sample("total track", Stats.get_time() - track_start, PerfStats::TIME);
renderInput(vertex.getDeviceImage(), normal.getDeviceImage(), depth.getDeviceImage(), kfusion.integration, kfusion.pose * getInverseCameraMatrix(kfusion.configuration.camera), kfusion.configuration.nearPlane, kfusion.configuration.farPlane, kfusion.configuration.stepSize(), 0.7 * kfusion.configuration.mu );
cudaDeviceSynchronize();
Stats.sample("view raycast");
Stats.sample("view copy");
glRasterPos2i(0,imageSize.y * 2);
glDrawPixels(vertex);
glRasterPos2i(imageSize.x, imageSize.y * 2);
glDrawPixels(normal);
glRasterPos2i(imageSize.x * 2, imageSize.y * 2);
glDrawPixels(depth);
Stats.sample("view draw");
Stats.sample("events");
Stats.sample("total all", Stats.get_time() - start, PerfStats::TIME);
if(counter % 30 == 0){
Stats.print();
Stats.reset();
cout << endl;
}
++counter;
printCUDAError();
glutSwapBuffers();
}