本文整理汇总了C++中Calibration类的典型用法代码示例。如果您正苦于以下问题:C++ Calibration类的具体用法?C++ Calibration怎么用?C++ Calibration使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Calibration类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: pointercal
void MainWindow::OnJiaoZhun()
{
#ifdef ARM
QFile pointercal("/etc/pointercal");
if (pointercal.exists())
{
pointercal.copy("/etc/pointercal.bak");
pointercal.remove();
}
if (!pointercal.exists() || pointercal.size() == 0)
{
for (;;)
{
Calibration cal;
cal.exec();
QMessageBox message(QMessageBox::Question,
QString::fromUtf8("提示"),
QString::fromUtf8("<p>请确认触摸屏已经校正完毕。</p>"
"<p>如果你不确认此提示消息,将在10秒钟后重新进入触摸屏设置程序。</p>"),
QMessageBox::Yes | QMessageBox::No);
QTimer::singleShot(10 * 1000, &message, SLOT(reject()));
int reply = message.exec();
if (reply == QMessageBox::Yes)
{
::sync();
break;
}
}
}
#endif
}
示例2: projectPoint
RealPoint3D<float> projectPoint(const Calibration& calibration, bool reduce2, int camera, const View& point) {
if(point.inCamera(camera)) {
RealPoint2D normalPoint;
if(reduce2) normalPoint = RealPoint2D(2.0f*point[camera].u(), 2.0f*point[camera].v());
else normalPoint = RealPoint2D(point[camera].u(), point[camera].v());
RealPoint2D rectifiedPoint = calibration.rectify(camera, normalPoint);
RealPoint3D<float> ray = calibration.ray(camera, rectifiedPoint);
return RealPoint3D<float>(ray.x(), ray.y(), ray.z());
} else {
return RealPoint3D<float>();
}
}
示例3: operator
void EpiMatcher::operator()(const Calibration& calib1, unsigned int camera1, const Calibration& calib2, unsigned int camera2,
const vector<ExtendedPoint2D>& pi1, const vector<ExtendedPoint2D>& pi2,
vector<pair<size_t, size_t> >& matches) {
matches.clear();
const HomogenousPoint3D ch((float)calib2.translation(camera2).x(), (float)calib2.translation(camera2).y(), (float)calib2.translation(camera2).z(), 1.0f);
HomogenousPoint2D e;
mulMat43Vect4(calib1.projectionMatrix(camera1), ch, e);
// Creating cross check vectors
vector<pair<float, int> > prevPIMatch_(pi1.size(), make_pair(0.0, -1));
vector<pair<float, int> > curPIMatch_(pi2.size(), make_pair(0.0, -1));
#pragma omp parallel for
for(size_t p = 0; p < pi2.size(); ++p) {
const ExtendedPoint2D& point2 = pi2[p];
// Preselecting candidates using epipolar constraint
vector<size_t> candidates;
selectingPoints(e, calib2, camera2, calib1, camera1, point2, pi1, candidates);
// Selecting the best point using ZNCC corelation
size_t bestPoint = 0;
float bestScore = 0.0;
for(vector<size_t>::const_iterator it = candidates.begin(); it != candidates.end(); it++) {
const float score = point2.descriptor().corelation(pi1[*it].descriptor());
if(score > bestScore) {
bestPoint = *it;
bestScore = score;
}
}
if(bestScore >= scoreMin_) {
#pragma omp critical
{
//matches.push_back(make_pair(bestPoint, p));
curPIMatch_[p] = make_pair(bestScore, bestPoint);
if(prevPIMatch_[bestPoint].first <= bestScore) prevPIMatch_[bestPoint] = make_pair(bestScore, p);
}
}
}
// Matching points
matches.clear();
for(size_t i = 0; i < curPIMatch_.size(); ++i) {
if(curPIMatch_[i].second != -1) {
if(prevPIMatch_[curPIMatch_[i].second].second == i) {
matches.push_back(make_pair(curPIMatch_[i].second, i));
}
}
}
}
示例4: ofBackground
void testApp::draw() {
ofBackground(128);
cam.begin();
glEnable(GL_DEPTH_TEST);
glPushMatrix();
glScaled(1, -1, -1);
ofDrawAxis(100);
ofSetColor(255);
curColor.draw(0, 0, curColor.getWidth(), curColor.getHeight());
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glColorPointer(3, GL_FLOAT, sizeof(Point3f), &(pointCloudColors[0].x));
glVertexPointer(3, GL_FLOAT, sizeof(Point3f), &(pointCloud[0].x));
glDrawArrays(GL_POINTS, 0, pointCloud.size());
glDisableClientState(GL_COLOR_ARRAY);
glDisableClientState(GL_VERTEX_ARRAY);
glDisable(GL_DEPTH_TEST);
ofSetColor(255);
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(2, GL_FLOAT, sizeof(Point2f), &(imagePoints[0].x));
glDrawArrays(GL_POINTS, 0, pointCloud.size());
glDisableClientState(GL_VERTEX_ARRAY);
Calibration* curCalibration;
if(mouseX < ofGetWidth() / 2) {
curCalibration = &kinectCalibration;
} else {
curCalibration = &colorCalibration;
applyMatrix(makeMatrix(rotationColorToKinect, translationColorToKinect));
}
curCalibration->draw3d(curImage);
glPopMatrix();
cam.end();
}
示例5: transformationSet
void ProbeCalibrationWidget::calibrate()
{
// gnerate the transformation (rotation and translation) matrixes for each image
std::cout<<std::endl;
std::cout << "Calculating Image Data" << std::endl;
std::cout << std::endl;
std::vector<vnl_matrix<double>* > transformationSet(imageStack.size());
Calibration * calibrator = Calibration::New();
calibrator->ClearTransformations();
calibrator->ClearImagePoints();
for (uint i = 0; i < imageStack.size(); i++) {
std::cout<<"Image "<<i+1<<" data"<<std::endl;
vnl_quaternion<double> quaternion(rotations[i][1], rotations[i][2],
rotations[i][3], rotations[i][0]);
vnl_matrix<double> transformation = quaternion.rotation_matrix_transpose();
transformation = transformation.transpose();
calibrator->InsertTransformations(transformation, translations.get_row(i));
calibrator->InsertImagePoints(coords[i]);
}
calibrator->Calibrate();
calibrationParameters = calibrator->getEstimatedUSCalibrationParameters();
}
示例6: toVector
MARG::MARG(const MARGConfiguration& configuration,
const Readout::MARG& readout,
const Temperature& temperature_difference,
const Calibration& calibration)
{
#ifdef USE_TEMPERATURE_COMPENSATION
const int16_t temperature_offset[2] = {
(int16_t)((temperature_difference.degreesCelsius() * LSM9DS0_OFFSET_PER_CELSIUS_G) / configuration.gScaleMdps()),
(int16_t)((temperature_difference.degreesCelsius() * LSM9DS0_OFFSET_PER_CELSIUS_A) / configuration.aScaleMg())
};
const float temperature_scale[2] = {
(float)temperature_difference.degreesCelsius() * sensitivity_per_celsius_g,
(float)temperature_difference.degreesCelsius() * sensitivity_per_celsius_a
};
#else
const int16_t temperature_offset[2] = { 0, 0 };
const float temperature_scale[2] = { 1, 1 };
#endif
const Vector<int16_t> raw_offset_adjusted[2] = {
readout.g - calibration.g_offset - temperature_offset[0],
readout.a - calibration.a_offset - temperature_offset[1]
};
const Vector<float> scale_adjusted[2] = {
calibration.g_scale * (1 + temperature_scale[0]),
calibration.a_scale * (1 + temperature_scale[1])
};
g_ = toVector(raw_offset_adjusted[0] * scale_adjusted[0], configuration.gScaleMdps() / 1000 * M_PI / 180);
a_ = toVector(raw_offset_adjusted[1] * scale_adjusted[1], configuration.aScaleMg() / 1000);
m_ = toVector(readout.m * calibration.m_rotation - calibration.m_offset, configuration.mScaleMgauss() / 1000);
if (calibration.hasInvertedPlacement()) {
a_.setX(-a_.x());
a_.setY(-a_.y());
}
if (!calibration.hasInvertedPlacement()) {
g_.setX(-g_.x());
g_.setY(-g_.y());
}
g_.setX(-g_.x());
g_.setY(-g_.y());
g_.setZ(-g_.z());
}
示例7: saveTransformation
void saveTransformation(Calibration& from, Calibration& to, string filename) {
Mat rotation, translation;
from.getTransformation(to, rotation, translation);
FileStorage fs(ofToDataPath(filename), FileStorage::WRITE);
fs << "rotation" << rotation;
fs << "translation" << translation;
cout << "rotation:" << endl << rotation << endl;
cout << "translation:" << endl << translation << endl;
}
示例8: createTriplets
void createTriplets(const Bundle2& bundle, const Calibration& calibration, unsigned int curFrame, const vector<ShudaTriplet3D>& prevTriplets, vector<ShudaTriplet3D>& triplets) {
triplets.clear();
bool usedTriplet[bundle.tracksCount()];
for(size_t i = 0; i < bundle.tracksCount(); ++i) usedTriplet[i] = false;
for(size_t i = 0; i < bundle.frame(curFrame).size(); ++i) {
for(int camera = 0; camera < calibration.num_cameras(); ++camera) {
Track& t = *(bundle.frame(curFrame)[i].track());
if(bundle.frame(curFrame)[i].inTrackNumber() + 2 < t.size()) {
ShudaTriplet3D triplet;
triplet.frame = curFrame;
triplet.camera = camera;
bool ok;
triplet.p1_2d = extractPoint(camera, bundle.frame(curFrame)[i], ok);
if(!ok) continue;
triplet.p1 = projectPoint(calibration, bundle.parameters().reduce2, camera, bundle.frame(curFrame)[i]);
triplet.p2_2d = extractPoint(camera, t[bundle.frame(curFrame)[i].inTrackNumber() + 1], ok);
if(!ok) continue;
triplet.p2 = projectPoint(calibration, bundle.parameters().reduce2, camera, t[bundle.frame(curFrame)[i].inTrackNumber() + 1]);
triplet.p3_2d = extractPoint(camera, t[bundle.frame(curFrame)[i].inTrackNumber() + 2], ok);
if(!ok) continue;
triplet.p3 = projectPoint(calibration, bundle.parameters().reduce2, camera, t[bundle.frame(curFrame)[i].inTrackNumber() + 2]);
bool found = false;
size_t j = 0;
while(!found && (j < prevTriplets.size())) {
if(prevTriplets[j].p2 == triplet.p1) found = true;
else ++j;
}
if(found) {
bool trunc = false;
if(!usedTriplet[j] && !trunc) {
triplet.prevTriplet = j;
usedTriplet[j] = true;
} else {
triplet.prevTriplet = -1;
}
} else triplet.prevTriplet = -1;
triplets.push_back(triplet);
}
}
}
}
示例9: selectingPoints
void EpiMatcher::selectingPoints(const HomogenousPoint2D& e, const Calibration& calib1, unsigned int cam1,
const Calibration& calib2, unsigned int cam2, const ExtendedPoint2D& point,
const vector<ExtendedPoint2D>& pi, vector<size_t>& candidates) const {
const RealPoint2D epiline_p1 = e;
HomogenousPoint2D rect_p2 = calib1.rectify(cam1, point);
HomogenousPoint3D tmp;
mulMat34Vect3(calib1.invProjectionMatrix(cam1), rect_p2, tmp);
HomogenousPoint2D tmp_p2;
mulMat43Vect4(calib2.projectionMatrix(cam2), tmp, tmp_p2);
const RealPoint2D epiline_p2 = tmp_p2;
const float epi_x = epiline_p2.u() - epiline_p1.u();
const float epi_y = epiline_p2.v() - epiline_p1.v();
const float denum = std::sqrt(epi_x*epi_x + epi_y*epi_y);
candidates.clear();
for(size_t i = 0; i < pi.size(); ++i) {
const RealPoint2D p1 = calib2.rectify(cam2, pi[i]);
const float dist = std::abs(epi_x*(epiline_p1.v() - p1.v()) - (epiline_p1.u() - p1.u())*epi_y)/denum;
if((double)dist <= maxDist_ && pi[i].distance(point) <= zoneRadius_) candidates.push_back(i);
}
}
示例10: ofLog
void Calibration::getTransformation(Calibration& dst, Mat& rotation, Mat& translation) {
if(imagePoints.size() == 0 || dst.imagePoints.size() == 0) {
ofLog(OF_LOG_ERROR, "getTransformation() requires both Calibration objects to have just been calibrated");
return;
}
if(imagePoints.size() != dst.imagePoints.size() || patternSize != dst.patternSize) {
ofLog(OF_LOG_ERROR, "getTransformation() requires both Calibration objects to be trained simultaneously on the same board");
return;
}
Mat fundamentalMatrix, essentialMatrix;
Mat cameraMatrix = distortedIntrinsics.getCameraMatrix();
Mat dstCameraMatrix = dst.getDistortedIntrinsics().getCameraMatrix();
// uses CALIB_FIX_INTRINSIC by default
stereoCalibrate(objectPoints,
imagePoints, dst.imagePoints,
cameraMatrix, distCoeffs,
dstCameraMatrix, dst.distCoeffs,
distortedIntrinsics.getImageSize(), rotation, translation,
essentialMatrix, fundamentalMatrix);
}
示例11: createFirstTriplets
void createFirstTriplets(const Bundle2& bundle, const Calibration& calibration, vector<ShudaTriplet3D>& triplets) {
for(size_t i = 0; i < bundle.frame(0).size(); ++i) {
for(int camera = 0; camera < calibration.num_cameras(); ++camera) {
ShudaTriplet3D triplet;
triplet.prevTriplet = -1;
triplet.frame = 0;
triplet.camera = camera;
bool ok;
Track& t = *(bundle.frame(0)[i].track());
triplet.p1_2d = extractPoint(camera, bundle.frame(0)[i], ok);
if(!ok) continue;
triplet.p1 = projectPoint(calibration, bundle.parameters().reduce2, camera, bundle.frame(0)[i]);
triplet.p2_2d = extractPoint(camera, t[1], ok);
if(!ok) continue;
triplet.p2 = projectPoint(calibration, bundle.parameters().reduce2, camera, t[1]);
triplet.p3_2d = extractPoint(camera, t[2], ok);
if(!ok) continue;
triplet.p3 = projectPoint(calibration, bundle.parameters().reduce2, camera, t[2]);
triplets.push_back(triplet);
}
}
}
示例12: renderCalibration
/// Render calibration
void Tuning::renderCalibration(Calibration& _calib)
const {
_calib.render(*this);
}
示例13: main
int main() {
int x, y , z;
int leftX = 0;
int leftY = 0;
int rightX = 0;
int rightY = 0;
Mat cam0P, cam1P;
Mat *Q = new Mat();
Mat *T = new Mat();
Calibration calibration;
Triangulate triangulation;
if (true) {
vector<Mat> PandP;
string filename = "stereo.xml";
PandP = calibration.Calibrate(filename, 5, 4, T, Q);
cam0P = PandP.at(0);
cam1P = PandP.at(1);
}
VideoCapture cap("/home/artyom/Dropbox/BigData/workspace/TriangulateHuman/Debug/out.avi");
/*
cap.set(CV_CAP_PROP_FRAME_WIDTH, 320);
cap.set(CV_CAP_PROP_FRAME_HEIGHT, 240);
*/
if (!cap.isOpened())
return -1;
Mat img;
VideoCapture cap1("/home/artyom/Dropbox/BigData/workspace/TriangulateHuman/Debug/out1.avi");
//cap.set(CV_CAP_PROP_FRAME_WIDTH, 320);
//cap.set(CV_CAP_PROP_FRAME_HEIGHT, 240);
if (!cap1.isOpened())
return -1;
Mat img1;
string Pos = "";
HOGDescriptor hog;
hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());
//string posPoint = "";
string posRect ="";
string posRect2 ="";
bool onceOnly = true;
while (true)
{
cap >> img;
cap1 >> img1;
/*
Mat tempFrame = img;
Mat tempFrame1 = img1;
cv::transpose(img, tempFrame);
cv::flip(tempFrame, tempFrame, 0);
cv::transpose(tempFrame, img);
cv::flip(img, img, 0);
cv::transpose(img1, tempFrame1);
cv::flip(tempFrame1, tempFrame1, 0);
cv::transpose(tempFrame1, img1);
cv::flip(img1, img1, 0);
*/
if (!img.data)
continue;
vector<Rect> found, found_filtered;
vector<Rect> found1, found_filtered1;
hog.detectMultiScale(img, found, 0, Size(8,8), Size(32,32), 1.05, 2);
hog.detectMultiScale(img1, found1, 0, Size(8,8), Size(32,32), 1.05, 2);
//hog.detect(img, found1, 0, Size(8,8), Size(0,0));
size_t i, j;
for (i=0; i<found.size(); i++)
{
Rect r = found[i];
for (j=0; j<found.size(); j++)
if (j!=i && (r & found[j])==r)
break;
if (j==found.size())
found_filtered.push_back(r);
}
for (i=0; i<found_filtered.size(); i++)
{
//.........这里部分代码省略.........
示例14: renderCalibration
/// Render calibration
void Tuning::renderCalibration(Calibration& _calib)
const {
_calib.render(*this,session_.exportSettings().outputMode());
}
示例15:
Filter::Filter(const Calibration &calibration)
: calibration_(calibration), accel_buffer_(ImuDevice::ACCELEROMETER, calibration.getBufferSize()), gyro_buffer_(ImuDevice::GYROSCOPE, calibration.getBufferSize()) {
}