本文整理汇总了C++中KalmanFilter::predict方法的典型用法代码示例。如果您正苦于以下问题:C++ KalmanFilter::predict方法的具体用法?C++ KalmanFilter::predict怎么用?C++ KalmanFilter::predict使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类KalmanFilter
的用法示例。
在下文中一共展示了KalmanFilter::predict方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: KalmanFilter
TEST (KalmanFilterTest, Works) {
KalmanFilter *kf = new KalmanFilter(true);
ufvector4 initX = boost::numeric::ublas::zero_vector<float> (4);
initX(0) = 50.f;
initX(1) = 0.f;
initX(2) = 0.f;
initX(3) = 0.f;
ufmatrix4 initCov = boost::numeric::ublas::identity_matrix<float> (4);
initCov(0,0) = 5.f;
initCov(1,1) = 5.f;
initCov(2,2) = 0.f;
initCov(3,3) = 0.f;
kf->initialize(initX, initCov);
kf->predict(kf->genOdometry(-5.f, 0.f, 0.f),10.f);
kf->updateWithObservation(kf->genVisBall(58.f, 0.f));
kf->predict(kf->genOdometry(0.f, 0.f, 0.f),.1f);
kf->updateWithObservation(kf->genVisBall(55.f, .05f));
kf->predict(kf->genOdometry(0.f, 0.f, 0.f),.1f);
kf->updateWithObservation(kf->genVisBall(55.f, .05f));
kf->predict(kf->genOdometry(0.f, 0.f, 0.f),.1f);
kf->updateWithObservation(kf->genVisBall(55.f, .05f));
kf->predict(kf->genOdometry(0.f, 0.f, 0.f),.1f);
kf->updateWithObservation(kf->genVisBall(83.f, .05f));
}
示例2: main
int main(int /*argc*/, char * /*argv*/[]) {
std::srand(std::time(NULL));
/// We want a simple 2d state
typedef SimpleState<2> state_t;
/// Our process model is the simplest possible: doesn't change the mean
typedef ConstantProcess<2, state_t> process_t;
/// Create a kalman filter instance with our chosen state and process types
KalmanFilter<state_t, process_t> kf;
/// Set our process model's variance
kf.processModel.sigma = state_t::VecState::Constant(6.5);
double dt = 0.5;
double sumSquaredError = 0;
const double measurementVariance = 9; //NOISE_AMPLITUDE / 2.0;
/// for sine curve
std::vector<StatePair> data = generateSineData(dt);
/// CSV header row
std::cout << "actual x,actual y,measurement x,measurement y,filtered x,filtered y,squared error" << std::endl;
double t = 0;
for (unsigned int i = 0; i < data.size(); ++i) {
/// Predict step: Update Kalman filter by predicting ahead by dt
kf.predict(dt);
/// "take a measurement" - in this case, noisify the actual measurement
Eigen::Vector2d pos = data[i].first;
Eigen::Vector2d m = data[i].second;
AbsoluteMeasurement<state_t> meas;
meas.measurement = m;
meas.covariance = Eigen::Vector2d::Constant(measurementVariance).asDiagonal();
/// Correct step: incorporate information from measurement into KF's state
kf.correct(meas);
/// Output info for csv
double squaredError = (pos[0] - kf.state.x[0]) * (pos[0] - kf.state.x[0]);
sumSquaredError += squaredError;
std::cout << pos[0] << "," << pos[1] << ",";
std::cout << meas.measurement[0] << "," << meas.measurement[1] << ",";
std::cout << kf.state.x[0] << "," << kf.state.x[1] << ",";
std::cout << squaredError;
std::cout << std::endl;
t += dt;
}
std::cerr << "Sum squared error: " << sumSquaredError << std::endl;
return 0;
}
示例3: Kalman_Perdict
int Kalman_Perdict(KalmanFilter &F)
{
if(notstarted[0] and notstarted[1] and notstarted[2])
{
Mat prediction = F.predict();
Point predictPt(prediction.at<float>(0),prediction.at<float>(1));
MyFilledCircle(skin2,predictPt,1);
}
return 0;
}
示例4: main
int main(int argc, char **argv)
{
ros::init(argc, argv, "pose_filter");
ros::NodeHandle nh;
tf::TransformListener lr;
tf::StampedTransform laser_trans, kinect_trans;
geometry_msgs::PoseStamped target_pose;
ros::Publisher pub = nh.advertise<geometry_msgs::PoseStamped>("leader_pose", 1);
target_pose.pose.position.z = 0; target_pose.pose.orientation = tf::createQuaternionMsgFromYaw(0);
target_pose.header.frame_id = "map";
Mat measurement = Mat(2, 1, CV_32FC1);
KalmanFilter kalman = KalmanFilter(4,2,0);
setup_kalman(kalman);
ros::Rate kalman_rate(KALMAN_HZ);
while(ros::ok())
{
try
{
lr.lookupTransform("map", "laser_target", ros::Time(0), laser_trans);
}
catch(tf::TransformException ex) {}
/*try
{
lr.lookupTransform("odom", "kinect_target", ros::Time(0), kinect_trans);
}
catch(tf::TransformException ex) {}
measurement.at<float>(0,0) = (laser_trans.getOrigin().x() + kinect_trans.getOrigin().x())/2;
measurement.at<float>(1,0) = (laser_trans.getOrigin().y() + kinect_trans.getOrigin().y())/2;*/
measurement.at<float>(0,0) = laser_trans.getOrigin().x();
measurement.at<float>(1,0) = laser_trans.getOrigin().y();
Mat predict = kalman.predict(); kalman.correct(measurement);
target_pose.pose.position.x = kalman.statePost.at<float>(0,0);
target_pose.pose.position.y = kalman.statePost.at<float>(1,0);
target_pose.header.stamp = laser_trans.stamp_;
pub.publish(target_pose);
kalman_rate.sleep();
}
return 0;
}
示例5: getKalmanCentroid
cv::Point getKalmanCentroid(int x, int y)
{
cv::Point kalman_centroid;
mouse_info.x = x;
mouse_info.y = y;
Mat prediction = KF.predict();
Point predictPt(prediction.at<float>(0),prediction.at<float>(1));
measurement(0) = mouse_info.x;
measurement(1) = mouse_info.y;
Point measPt(measurement(0),measurement(1));
mousev.push_back(measPt);
// generate measurement
//measurement += KF.measurementMatrix*state;
Mat estimated = KF.correct(measurement);
Point statePt(estimated.at<float>(0),estimated.at<float>(1));
kalmanv.push_back(statePt);
//drawCross( statePt, Scalar(0,255,0), 5 ); // Kalman
//drawCross( measPt, Scalar(0,0,255), 5 ); // Original
// for (int i = 0; i < mousev.size()-1; i++) {
// line(kalman_img, mousev[i], mousev[i+1], Scalar(255,255,0), 1);
// }
// for (int i = 0; i < kalmanv.size()-1; i++) {
// line(kalman_img, kalmanv[i], kalmanv[i+1], Scalar(0,255,0), 1);
// }
// if (contador > 30)
// {
// initializeKalman();
// }
// else
// {
// //ROS_INFO(">>> MEDIDA(%d,%d) KALMAN(%d,%d)", measPt.x, measPt.y, statePt.x, statePt.y);
// }
//imshow("mi img", img);
//imshow("Kalman", kalman_img);
kalman_centroid.x = statePt.x;
kalman_centroid.y = statePt.y;
return kalman_centroid;
}
示例6: statePt
vector<Point> Widget::findKalmanCenters(vector<Point> dataInput){
vector<Point> kalmanCenters;
if(dataInput.empty()){
errorMsg("Nao ha objetos a serem detectados!");
return kalmanCenters;
}
kalmanCenters.resize(dataInput.size());
KalmanFilter *KF;
for(unsigned int i = 0; i < targets.size(); i++){
KF = targets[i]->KF;
//apenas conversao de estruturas usadas
Mat_<float> measurement(2,1);
measurement(0) = dataInput[i].x;
measurement(1) = dataInput[i].y;
Mat estimated;
Mat prediction = KF->predict();
if(detectionSucess[i] == true)
{
//caso a detecção tenha sido um sucesso jogamos a nova medida no filtro
//ao chamar correct já estamos adicionando a nova medida ao filtro
estimated = KF->correct(measurement);
}
else{
/*caso a medição tenha falhada realimentamos o filtro com a própria
predição anterior*/
Mat_<float> predictedMeasurement(2,1);
predictedMeasurement(0) = prediction.at<float>(0);
predictedMeasurement(1) = prediction.at<float>(1);
estimated = KF->correct(predictedMeasurement);
KF->errorCovPre.copyTo(KF->errorCovPost);
//copiar a covPre para covPos [dica de um usuário de algum fórum]
}
Point statePt(estimated.at<float>(0),estimated.at<float>(1));
kalmanCenters[i] = statePt;
/*existe o centro medido pela previsão e o centro que o filtro de kalman
acredita ser o real. O centro de kalman é uma ponderação das medidas e do modelo
com conhecimento prévio de um erro aleatório*/
}
return kalmanCenters;
}
示例7: LOGD
JNIEXPORT jlong JNICALL Java_org_opencv_video_KalmanFilter_predict_11
(JNIEnv* env, jclass , jlong self)
{
try {
LOGD("video::predict_11()");
KalmanFilter* me = (KalmanFilter*) self; //TODO: check for NULL
Mat _retval_ = me->predict( );
return (jlong) new Mat(_retval_);
} catch(cv::Exception e) {
LOGD("video::predict_11() catched cv::Exception: %s", e.what());
jclass je = env->FindClass("org/opencv/core/CvException");
if(!je) je = env->FindClass("java/lang/Exception");
env->ThrowNew(je, e.what());
return 0;
} catch (...) {
LOGD("video::predict_11() catched unknown exception (...)");
jclass je = env->FindClass("java/lang/Exception");
env->ThrowNew(je, "Unknown exception in JNI code {video::predict_11()}");
return 0;
}
}
示例8: predictPt
vector<vector<Point> > Widget::predictFuture(int futureSize){
vector<vector<Point> > future;
future.resize(targets.size());
for(unsigned int i = 0; i < targets.size(); i++)
{
KalmanFilter *KF;
KF = targets[i]->KF;
KalmanFilter DelfusOracle = myMath::copyKF(*KF);
/*para ver o futuro copiamos o estado do filtro atual e
o realimentamos com suas próprias previsões um número fixo de vezes*/
for(int j = 0; j < futureSize; j++)
//CALCULA PONTOS DO FUTURO
{
Mat prediction = DelfusOracle.predict();
Point predictPt(prediction.at<float>(0),prediction.at<float>(1));
future[i].push_back(predictPt);
Mat_<float> predictedMeasurement(2,1);
predictedMeasurement(0) = prediction.at<float>(0);
predictedMeasurement(1) = prediction.at<float>(1);
DelfusOracle.correct(predictedMeasurement);
DelfusOracle.errorCovPre.copyTo(DelfusOracle.errorCovPost);
//copiamos covPre para covPost seguindo a dica de um fórum
//o Vidal não gosta dessa dica, diz ele que isso engana o filtro
}
}//end for each color
return future;
}
示例9: kalmanPredict
Point kalmanPredict()
{
Mat prediction = KF.predict();
Point predictPt(prediction.at<float>(0),prediction.at<float>(1));
return predictPt;
}
示例10: detectAndTrackFace
//.........这里部分代码省略.........
{
float covNorm = sqrt(
pow(KFTracker.errorCovPost.at<float>(0,0), 2) +
pow(KFTracker.errorCovPost.at<float>(1,1), 2)
);
if (!isFaceInCurrentFrame)
{
stateCounter++;
}
else
{
stateCounter = 0;
trackingState = STATE_TRACK;
}
if ((stateCounter > minRejectFrames) && (covNorm > maxRejectCov))
{
trackingState = STATE_LOST;
stateCounter = 0;
resetKalmanFilter();
reset();
}
}
if ((trackingState == STATE_TRACK) || (trackingState == STATE_REJECT))
{
bool updateFaceHist = false;
// This is important:
KFTracker.transitionMatrix.at<float>(0,2) = dt;
KFTracker.transitionMatrix.at<float>(1,3) = dt;
Mat pred = KFTracker.predict();
if (isFaceInCurrentFrame)
{
//std::cout << vecAvgComp.size() << " detections in image " << std::endl;
float minCovNorm = 1e24;
int i = 0;
for( vector<CvAvgComp>::const_iterator rr = vecAvgComp.begin(); rr != vecAvgComp.end(); rr++, i++ )
{
copyKalman(KFTracker, MLSearch);
CvRect r = rr->rect;
r.x += searchROI.x;
r.y += searchROI.y;
double nr = rr->neighbors;
Point center;
Scalar color = colors[i%8];
float normFaceScore = 1.0 - (nr / 40.0);
if (normFaceScore > 1.0) normFaceScore = 1.0;
if (normFaceScore < 0.0) normFaceScore = 0.0;
setIdentity(MLSearch.measurementNoiseCov, Scalar_<float>::all(normFaceScore));
center.x = cvRound(r.x + r.width*0.5);
center.y = cvRound(r.y + r.height*0.5);
measurement.at<float>(0) = r.x;
measurement.at<float>(1) = r.y;
measurement.at<float>(2) = r.width;
measurement.at<float>(3) = r.height;
MLSearch.correct(measurement);
float covNorm = sqrt(
示例11: q
void *thread_sensor(void* arg)
{
//setup();
/*
mpu6050.Init();
measurement.setTo(Scalar(0));
kalman.transitionMatrix =
*(Mat_<float>(4, 4) <<
1, 0, 1, 0,
0, 1, 0, 1,
0, 0, 1, 0,
0, 0, 0, 1);
kalman.statePre.at<float>(0) = mpu6050.accelX();
kalman.statePre.at<float>(1) = mpu6050.accelY();
kalman.statePre.at<float>(2) = mpu6050.accelZ();
kalman.statePre.at<float>(3) = 0.0;
setIdentity(kalman.measurementMatrix);
setIdentity(kalman.processNoiseCov, Scalar::all(1e-4));
setIdentity(kalman.measurementNoiseCov, Scalar::all(10));
setIdentity(kalman.errorCovPost, Scalar::all(.1));
*/
//double x, y, z;
//double xSpeed = 0, ySpeed = 0, zSpeed = 0;
//double X = 0, Y = 0, Z = 0;
Quaternion q; // [w, x, y, z] quaternion container
while (1)
{
kalman.predict();
/*
x = mpu6050.accelX();
y = mpu6050.accelY();
z = mpu6050.accelZ();
measurement(0) = x;
measurement(1) = y;
measurement(2) = z;
*/
readFIFO();
// Calcurate Gravity and Yaw, Pitch, Roll
mpu.dmpGetQuaternion(&q, fifoBuffer);
mpu.dmpGetAccel(&accelRaw, fifoBuffer);
mpu.dmpGetGravity(&gravity, &q);
mpu.dmpGetLinearAccel(&accelReal, &accelRaw, &gravity);
mpu.dmpGetLinearAccelInWorld(&accelWorld, &accelReal, &q);
measurement(0) = accelWorld.x;
measurement(1) = accelWorld.y;
measurement(2) = accelWorld.z;
estimated = kalman.correct(measurement);
/*
xSpeed += estimated.at<float>(0) * INTERVAL / 1000; // speed m/s
ySpeed += estimated.at<float>(1) * INTERVAL / 1000;
zSpeed += estimated.at<float>(2) * INTERVAL / 1000;
X += xSpeed * INTERVAL / 1000; // speed * INTERVAL / 1000 * 1000 displacement in mm
Y += ySpeed * INTERVAL / 1000;
Z += zSpeed * INTERVAL / 1000;
Quaternion q(0.0, x, y, z);
VectorFloat vector[3];
GetGravity(vector, &q);
float ypr[3];
GetYawPitchRoll(ypr, &q, vector);
//mpu6050.Next();
*/
printf("%8.5f, %8.5f, %8.5f\n",
estimated.at<float>(0), estimated.at<float>(1), estimated.at<float>(2));
usleep(1000 * INTERVAL);
}
return NULL;
}
示例12: faceNormalize
Mat faceNormalize(Mat face, int desiredFaceWidth, bool &sucess) {
int im_height = face.rows;
Mat faceProcessed;
cv::resize(face, face, Size(400, 400), 1.0, 1.0, INTER_CUBIC);
const float EYE_SX = 0.16f;
const float EYE_SY = 0.26f;
const float EYE_SW = 0.30f;
const float EYE_SH = 0.28f;
int leftX = cvRound(face.cols * EYE_SX);
int topY = cvRound(face.rows * EYE_SY);
int widthX = cvRound(face.cols * EYE_SW);
int heightY = cvRound(face.rows * EYE_SH);
int rightX = cvRound(face.cols * (1.0-EYE_SX-EYE_SW) );
Mat topLeftOfFace = face(Rect(leftX, topY, widthX, heightY));
Mat topRightOfFace = face(Rect(rightX, topY, widthX, heightY));
CascadeClassifier haar_cascade;
haar_cascade.load(PATH_CASCADE_LEFTEYE);
vector< Rect_<int> > detectedRightEye;
vector< Rect_<int> > detectedLeftEye;
Point leftEye = Point(-1, -1), rightEye = Point(-1, -1);
// Find the left eye:
haar_cascade.detectMultiScale(topLeftOfFace, detectedLeftEye);
for(int i = 0; i < detectedLeftEye.size(); i++) {
Rect eye_i = detectedLeftEye[i];
eye_i.x += leftX;
eye_i.y += topY;
leftEye = Point(eye_i.x + eye_i.width/2, eye_i.y + eye_i.height/2);
}
// If cascade fails, try another
if(detectedLeftEye.empty()) {
haar_cascade.load(PATH_CASCADE_BOTHEYES);
haar_cascade.detectMultiScale(topLeftOfFace, detectedLeftEye);
for(int i = 0; i < detectedLeftEye.size(); i++) {
Rect eye_i = detectedLeftEye[i];
eye_i.x += leftX;
eye_i.y += topY;
leftEye = Point(eye_i.x + eye_i.width/2, eye_i.y + eye_i.height/2);
}
}
//Find the right eye
haar_cascade.load(PATH_CASCADE_RIGHTEYE);
haar_cascade.detectMultiScale(topRightOfFace, detectedRightEye);
for(int i = 0; i < detectedRightEye.size(); i++) {
Rect eye_i = detectedRightEye[i];
eye_i.x += rightX;;
eye_i.y += topY;
rightEye = Point(eye_i.x + eye_i.width/2, eye_i.y + eye_i.height/2);
}
// If cascade fails, try another
if(detectedRightEye.empty()) {
haar_cascade.load(PATH_CASCADE_BOTHEYES);
haar_cascade.detectMultiScale(topLeftOfFace, detectedRightEye);
for(int i = 0; i < detectedRightEye.size(); i++) {
Rect eye_i = detectedRightEye[i];
eye_i.x += leftX;
eye_i.y += topY;
rightEye = Point(eye_i.x + eye_i.width/2, eye_i.y + eye_i.height/2);
}
}
// Inicializacao dos kalman filters
if(initiKalmanFilter && leftEye.x >= 0 && rightEye.x >= 0) {
KFrightEye.statePre.at<float>(0) = rightEye.x;
KFrightEye.statePre.at<float>(1) = rightEye.y;
KFrightEye.statePre.at<float>(2) = 0;
KFrightEye.statePre.at<float>(3) = 0;
KFrightEye.transitionMatrix = (Mat_<float>(4, 4) << 1,0,0,0, 0,1,0,0, 0,0,1,0, 0,0,0,1);
setIdentity(KFrightEye.measurementMatrix);
setIdentity(KFrightEye.processNoiseCov, Scalar::all(1e-4));
setIdentity(KFrightEye.measurementNoiseCov, Scalar::all(1e-1));
setIdentity(KFrightEye.errorCovPost, Scalar::all(.1));
KFleftEye.statePre.at<float>(0) = leftEye.x;
KFleftEye.statePre.at<float>(1) = leftEye.y;
KFleftEye.statePre.at<float>(2) = 0;
KFleftEye.statePre.at<float>(3) = 0;
KFleftEye.transitionMatrix = (Mat_<float>(4, 4) << 1,0,0,0, 0,1,0,0, 0,0,1,0, 0,0,0,1);
setIdentity(KFleftEye.measurementMatrix);
setIdentity(KFleftEye.processNoiseCov, Scalar::all(1e-4));
setIdentity(KFleftEye.measurementNoiseCov, Scalar::all(1e-1));
setIdentity(KFleftEye.errorCovPost, Scalar::all(.1));
initiKalmanFilter = false;
}
// Predicao e correcao dos kalman filter
if(!initiKalmanFilter && leftEye.x >= 0) {
Mat prediction = KFleftEye.predict();
Point predictPt(prediction.at<float>(0),prediction.at<float>(1));
measurement(0) = leftEye.x;
measurement(1) = leftEye.y;
Point measPt(measurement(0),measurement(1));
Mat estimated = KFleftEye.correct(measurement);
Point statePt(estimated.at<float>(0),estimated.at<float>(1));
leftEyePredict.x = statePt.x;
leftEyePredict.y = statePt.y;
//.........这里部分代码省略.........
示例13: main
//.........这里部分代码省略.........
float accelZMean = accelZ - accelMeans[2];
float gyroX = gyroData[i][0] - gyroMeans[0];
float gyroY = gyroData[i][1] - gyroMeans[1];
float gyroZ = gyroData[i][2] - gyroMeans[2];
float magX = magData[i][0];
float magY = magData[i][1];
float magZ = magData[i][2];
float pX = 0.0f;
float pY = 0.0f;
float pZ = 0.0f;
float vX = lastMeasurement[3] + (accelXMean * dt);
float vY = lastMeasurement[4] + (accelYMean * dt);
float vZ = lastMeasurement[5] + (accelZMean * dt);
float accelAngleZ = accelZ + (1.0f - accelMeans[2]);
// Calculate attitude_x from y and z component of gravity vector
float aX = atan2(accelY, accelAngleZ) * (180.0f / 3.14f);
// Calculate attitude_y from x and z component of gravity vector
float aY = atan2(accelX, accelAngleZ) * (180.0f / 3.14f);
// Calculate attitude_z from x and z component of mag reading
// (probably will not be accurate)
float aZ = atan2(magX, magZ) * (180.0f / 3.14f);
accelAnglesFile << aX << ", " << aY << ", " << aZ << endl;
float measurement[12] =
{
aX,
aY,
aZ,
pX,
pY,
pZ,
vX,
vY,
vZ,
0.0f,
0.0f,
0.0f
};
float measurement6[6] =
{
aX,
aY,
aZ,
pX,
pY,
pZ
};
kalmanFilter.getUMatrix() << gyroX, gyroY, gyroZ;
kalmanFilter6.getUMatrix() << gyroX, gyroY, gyroZ;
// Predict
kalmanFilter.predict();
kalmanFilter6.predict();
kalmanFilter.update(measurement);
kalmanFilter6.update(measurement6);
filteredStatesFile <<
kalmanFilter.getXMatrix()(0, 0) << ", " <<
kalmanFilter.getXMatrix()(1, 0) << ", " <<
kalmanFilter.getXMatrix()(2, 0) << ", " <<
kalmanFilter.getXMatrix()(3, 0) << ", " <<
kalmanFilter.getXMatrix()(4, 0) << ", " <<
kalmanFilter.getXMatrix()(5, 0) << ", " <<
kalmanFilter.getXMatrix()(6, 0) << ", " <<
kalmanFilter.getXMatrix()(7, 0) << ", " <<
kalmanFilter.getXMatrix()(8, 0) << ", " <<
kalmanFilter.getXMatrix()(9, 0) << ", " <<
kalmanFilter.getXMatrix()(10, 0) << ", " <<
kalmanFilter.getXMatrix()(11, 0);
filteredStatesFile << endl;
filteredStatesFile6 <<
kalmanFilter6.getXMatrix()(0, 0) << ", " <<
kalmanFilter6.getXMatrix()(1, 0) << ", " <<
kalmanFilter6.getXMatrix()(2, 0) << ", " <<
kalmanFilter6.getXMatrix()(3, 0) << ", " <<
kalmanFilter6.getXMatrix()(4, 0) << ", " <<
kalmanFilter6.getXMatrix()(5, 0);
filteredStatesFile6 << endl;
memcpy(measurement, lastMeasurement, 12 * 4);
memcpy(measurement6, lastMeasurement6, 6 * 4);
}
filteredStatesFile.close();
filteredStatesFile6.close();
accelAnglesFile.close();
return 0;
}