本文整理汇总了C++中Mat_::clone方法的典型用法代码示例。如果您正苦于以下问题:C++ Mat_::clone方法的具体用法?C++ Mat_::clone怎么用?C++ Mat_::clone使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Mat_
的用法示例。
在下文中一共展示了Mat_::clone方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: SimilarityTransform
//通过引用,修改rotation和scale,使shape1和shape2差距最小
void SimilarityTransform(const Mat_<double>& shape1, const Mat_<double>& shape2, Mat_<double>& rotation, double& scale) {
rotation = Mat::zeros(2, 2, CV_64FC1);
scale = 0;
// center the data
double center_x_1 = 0;
double center_y_1 = 0;
double center_x_2 = 0;
double center_y_2 = 0;
for (int i = 0; i < shape1.rows; i++) {
center_x_1 += shape1(i, 0);
center_y_1 += shape1(i, 1);
center_x_2 += shape2(i, 0);
center_y_2 += shape2(i, 1);
}
center_x_1 /= shape1.rows;
center_y_1 /= shape1.rows;
center_x_2 /= shape2.rows;
center_y_2 /= shape2.rows;
Mat_<double> temp1 = shape1.clone();
Mat_<double> temp2 = shape2.clone();
for (int i = 0; i < shape1.rows; i++) {
temp1(i, 0) -= center_x_1;
temp1(i, 1) -= center_y_1;
temp2(i, 0) -= center_x_2;
temp2(i, 1) -= center_y_2;
}
//至此,temp1(与shape1形状相同)和temp2(与shape2形状相同)已经移到了以(0,0)为原点的坐标系
Mat_<double> covariance1, covariance2; //covariance = 协方差
Mat_<double> mean1, mean2;
// calculate covariance matrix
calcCovarMatrix(temp1, covariance1, mean1, CV_COVAR_COLS); //输出covariance1为temp1的协方差,mean1为temp1的均值
calcCovarMatrix(temp2, covariance2, mean2, CV_COVAR_COLS);
double s1 = sqrt(norm(covariance1)); //norm用来计算covariance1的L2范数
double s2 = sqrt(norm(covariance2));
scale = s1 / s2;
temp1 = 1.0 / s1 * temp1;
temp2 = 1.0 / s2 * temp2;
// 至此,通过缩放,temp1和temp2的差距最小(周叔说,这是最小二乘法)
double num = 0;
double den = 0;
for (int i = 0; i < shape1.rows; i++) {
num = num + temp1(i, 1) * temp2(i, 0) - temp1(i, 0) * temp2(i, 1);
den = den + temp1(i, 0) * temp2(i, 0) + temp1(i, 1) * temp2(i, 1);
}
double norm = sqrt(num * num + den * den);
double sin_theta = num / norm;
double cos_theta = den / norm;
rotation(0, 0) = cos_theta;
rotation(0, 1) = -sin_theta;
rotation(1, 0) = sin_theta;
rotation(1, 1) = cos_theta;
}
示例2: SimilarityTransform
void SimilarityTransform(const Mat_<double>& shape1, const Mat_<double>& shape2,
Mat_<double>& rotation,double& scale){
rotation = Mat::zeros(2,2,CV_64FC1);
scale = 0;
// center the data
double center_x_1 = 0;
double center_y_1 = 0;
double center_x_2 = 0;
double center_y_2 = 0;
for(int i = 0;i < shape1.rows;i++){
center_x_1 += shape1(i,0);
center_y_1 += shape1(i,1);
center_x_2 += shape2(i,0);
center_y_2 += shape2(i,1);
}
center_x_1 /= shape1.rows;
center_y_1 /= shape1.rows;
center_x_2 /= shape2.rows;
center_y_2 /= shape2.rows;
Mat_<double> temp1 = shape1.clone();
Mat_<double> temp2 = shape2.clone();
for(int i = 0;i < shape1.rows;i++){
temp1(i,0) -= center_x_1;
temp1(i,1) -= center_y_1;
temp2(i,0) -= center_x_2;
temp2(i,1) -= center_y_2;
}
Mat_<double> covariance1, covariance2;
Mat_<double> mean1,mean2;
// calculate covariance matrix
calcCovarMatrix(temp1,covariance1,mean1,CV_COVAR_COLS);
calcCovarMatrix(temp2,covariance2,mean2,CV_COVAR_COLS);
double s1 = sqrt(norm(covariance1));
double s2 = sqrt(norm(covariance2));
scale = s1 / s2;
temp1 = 1.0 / s1 * temp1;
temp2 = 1.0 / s2 * temp2;
double num = 0;
double den = 0;
for(int i = 0;i < shape1.rows;i++){
num = num + temp1(i,1) * temp2(i,0) - temp1(i,0) * temp2(i,1);
den = den + temp1(i,0) * temp2(i,0) + temp1(i,1) * temp2(i,1);
}
double norm = sqrt(num*num + den*den);
double sin_theta = num / norm;
double cos_theta = den / norm;
rotation(0,0) = cos_theta;
rotation(0,1) = -sin_theta;
rotation(1,0) = sin_theta;
rotation(1,1) = cos_theta;
}
示例3: similarityTransform
void similarityTransform(const Mat_<double> &fromShape,
const Mat_<double> &toShape,
Mat_<double> &rotation, double &scale)
{
rotation = Mat::zeros(2, 2, CV_64FC1);
scale = 0;
assert(fromShape.rows == toShape.rows);
int rows = fromShape.rows;
double fcx = 0, fcy = 0, tcx = 0, tcy = 0;
for (int i = 0; i < rows; i++) {
fcx += fromShape(i, 0);
fcy += fromShape(i, 1);
tcx += toShape(i, 0);
tcy += toShape(i, 1);
}
fcx /= rows;
fcy /= rows;
tcx /= rows;
tcy /= rows;
Mat_<double> ftmp = fromShape.clone();
Mat_<double> ttmp = toShape.clone();
for (int i = 0; i < rows; i++) {
ftmp(i, 0) -= fcx;
ftmp(i, 1) -= fcy;
ttmp(i, 0) -= tcx;
ttmp(i, 1) -= tcy;
}
Mat_<double> fcov, tcov;
Mat_<double> fmean, tmean;
calcCovarMatrix(ftmp, fcov, fmean, CV_COVAR_COLS);
calcCovarMatrix(ttmp, tcov, tmean, CV_COVAR_COLS);
double fsize = sqrtf(norm(fcov));
double tsize = sqrtf(norm(tcov));
scale = tsize / fsize;
ftmp /= fsize;
ttmp /= tsize;
double num = 0, den = 0;
// an efficient way to calculate rotation, using cross and dot production
for (int i = 0; i < rows; i++) {
num += ftmp(i, 1)*ttmp(i, 0) - ftmp(i, 0)*ttmp(i, 1);
den += ftmp(i, 0)*ttmp(i, 0) + ftmp(i, 1)*ttmp(i, 1);
}
double norm = sqrtf(num*num + den*den);
// theta is clock-wise rotation angle(fromShape -> toShape)
double sinTheta = num / norm;
double cosTheta = den / norm;
rotation(0, 0) = cosTheta;
rotation(0, 1) = sinTheta;
rotation(1, 0) = -sinTheta;
rotation(1, 1) = cosTheta;
}
示例4: SimilarityTransform
void SimilarityTransform(const Mat_<float>& shape1, const Mat_<float>& shape2,
Mat_<float>& rotation,float& scale){
rotation = Mat::zeros(2,2,CV_32FC1);
scale = 0;
// center the data
float center_x_1 = 0;
float center_y_1 = 0;
float center_x_2 = 0;
float center_y_2 = 0;
for(int i = 0;i < shape1.rows;i++){
center_x_1 += shape1(i,0);
center_y_1 += shape1(i,1);
center_x_2 += shape2(i,0);
center_y_2 += shape2(i,1);
}
center_x_1 /= shape1.rows;
center_y_1 /= shape1.rows;
center_x_2 /= shape2.rows;
center_y_2 /= shape2.rows;
Mat_<float> temp1 = shape1.clone();
Mat_<float> temp2 = shape2.clone();
for(int i = 0;i < shape1.rows;i++){
temp1(i,0) -= center_x_1;
temp1(i,1) -= center_y_1;
temp2(i,0) -= center_x_2;
temp2(i,1) -= center_y_2;
}
Mat_<float> covariance1, covariance2;
Mat_<float> mean1,mean2;
// calculate covariance matrix
calcCovarMatrix(temp1,covariance1,mean1,CV_COVAR_SCALE|CV_COVAR_ROWS|CV_COVAR_NORMAL,CV_32F);
calcCovarMatrix(temp2,covariance2,mean2,CV_COVAR_SCALE|CV_COVAR_ROWS|CV_COVAR_NORMAL,CV_32F);
//cout<<covariance1<<endl;
//cout<<covariance2<<endl;
float s1 = sqrt(norm(covariance1));
float s2 = sqrt(norm(covariance2));
scale = s1 / s2;
temp1 = 1.0 / s1 * temp1;
temp2 = 1.0 / s2 * temp2;
float num = 0;
float den = 0;
for(int i = 0;i < shape1.rows;i++){
num = num + temp1(i,1) * temp2(i,0) - temp1(i,0) * temp2(i,1);
den = den + temp1(i,0) * temp2(i,0) + temp1(i,1) * temp2(i,1);
}
float norm = sqrt(num*num + den*den);
float sin_theta = num / norm;
float cos_theta = den / norm;
rotation(0,0) = cos_theta;
rotation(0,1) = -sin_theta;
rotation(1,0) = sin_theta;
rotation(1,1) = cos_theta;
}
示例5: amplify_spatial_lpyr_temporal_ideal
void Evm::amplify_spatial_lpyr_temporal_ideal(const cv::Mat &src, std::vector<cv::Mat_<cv::Vec3f> > &lapPyr, cv::Mat &dst, float alpha, float lambda_c, float fl, float fh, float samplingRate, float chromAttenuation)
{
Mat_<Vec3f> s = src.clone();
rgb2ntsc(src, s);
buildLaplacianPyramid(s, lapPyr);
if (is_not_first_frame) {
//temporal ideal
for (int i=0; i<lapPyr.size(); i++) {
//TODO:implement temporal ideal filter
//
}
//amplify
// amplifyByAlpha(src, alpha, lambda_c);
} else {
lowpass1 = lapPyr;
lowpass2 = lapPyr;
filtered = lapPyr;
is_not_first_frame = true;
}
dst = s + reconstructImgFromLapPyramid(filtered).mul(Vec3f(1,chromAttenuation,chromAttenuation));
s = dst.clone();
ntsc2rgb(s, s);
dst = s.clone();
}
示例6: amplify_spatial_lpyr_temporal_iir
void Evm::amplify_spatial_lpyr_temporal_iir(const Mat& src, vector<Mat_<Vec3f> >& lapPyr, Mat& dst,
float alpha, float lambda_c, float r1, float r2, float chromAttenuation)
{
Mat_<Vec3f> s = src.clone();
rgb2ntsc(src, s);
buildLaplacianPyramid(s, lapPyr);
if (is_not_first_frame) {
//temporal iir
for (int i=0; i<lapPyr.size(); i++) {
Mat temp1 = (1-r1)*lowpass1[i] + r1*lapPyr[i];
Mat temp2 = (1-r2)*lowpass2[i] + r2*lapPyr[i];
lowpass1[i] = temp1;
lowpass2[i] = temp2;
filtered[i] = temp1 - temp2;
}
//amplify
amplifyByAlpha(src, alpha, lambda_c);
} else { // first frame
lowpass1 = lapPyr;
lowpass2 = lapPyr;
filtered = lapPyr;
is_not_first_frame = true;
}
dst = s + reconstructImgFromLapPyramid(filtered).mul(Vec3f(1,chromAttenuation,chromAttenuation));
s = dst.clone();
ntsc2rgb(s, s);
dst = s.clone();
}
示例7: fft2
void fft2(Mat_<float> src)
{
int x = getOptimalDFTSize(2* src.rows );
int y = getOptimalDFTSize(2* src.cols );
copyMakeBorder(src, src, 0, (x - src.rows), 0, (y - src.cols), BORDER_CONSTANT, Scalar::all(0));
// Get padded image size
const int wx = src.cols, wy = src.rows;
const int cx = wx/2, cy = wy/2;
//--------------------------------//
// DFT - performing //
cv::Mat_<float> imgs[] = {src.clone(), Mat::zeros(src.size(), CV_32F)};
cv::Mat_<cv::Vec2f> img_dft;
merge(imgs,2,img_dft);
dft(img_dft, img_dft);
split(img_dft,imgs);
cv::Mat_<float> magnitude, phase;
cartToPolar(imgs[0],imgs[1],magnitude,phase);
dftshift(magnitude);
magnitude = magnitude + 1.0f;
log(magnitude,magnitude);
normalize(magnitude,magnitude,0,1,CV_MINMAX);
namedWindow("img_dft",WINDOW_NORMAL);
imshow("img_dft",magnitude);
waitKey(0);
cout << "out" << endl;
}
示例8: extract_rigid_points
// Pick only the more stable/rigid points under changes of expression
void extract_rigid_points(Mat_<double>& source_points, Mat_<double>& destination_points)
{
if(source_points.rows == 68)
{
Mat_<double> tmp_source = source_points.clone();
source_points = Mat_<double>();
// Push back the rigid points (some face outline, eyes, and nose)
source_points.push_back(tmp_source.row(0));
source_points.push_back(tmp_source.row(2));
source_points.push_back(tmp_source.row(14));
source_points.push_back(tmp_source.row(16));
source_points.push_back(tmp_source.row(36));
source_points.push_back(tmp_source.row(39));
source_points.push_back(tmp_source.row(43));
source_points.push_back(tmp_source.row(38));
source_points.push_back(tmp_source.row(42));
source_points.push_back(tmp_source.row(45));
source_points.push_back(tmp_source.row(31));
source_points.push_back(tmp_source.row(33));
source_points.push_back(tmp_source.row(35));
Mat_<double> tmp_dest = destination_points.clone();
destination_points = Mat_<double>();
// Push back the rigid points
destination_points.push_back(tmp_dest.row(0));
destination_points.push_back(tmp_dest.row(2));
destination_points.push_back(tmp_dest.row(14));
destination_points.push_back(tmp_dest.row(16));
destination_points.push_back(tmp_dest.row(36));
destination_points.push_back(tmp_dest.row(39));
destination_points.push_back(tmp_dest.row(43));
destination_points.push_back(tmp_dest.row(38));
destination_points.push_back(tmp_dest.row(42));
destination_points.push_back(tmp_dest.row(45));
destination_points.push_back(tmp_dest.row(31));
destination_points.push_back(tmp_dest.row(33));
destination_points.push_back(tmp_dest.row(35));
}
}
示例9: calculate_tangent
void calculate_tangent(const Mat& input_image, const Mat_<double>& tangent,
const Mat_<int>& boundary, const Mat_<int>& region_id,int current_region_id,
Mat_<double>& tangent){
int width = input_image.cols;
int height = input_image.rows;
Mat_<int> visited(height,width);
Mat_<int> current_boundary(width*height,2);
current_boundary = boundary.clone();
int current_boundary_size = current_boundary.rows();
while(true){
// find new boundary after thinning
for(int i = 0;i < current_boundary_size;i++){
int x = current_boundary(i,0);
int y = current_boundary(i,1);
Mat_<int> new_boundary(width*height,2);
int new_boundary_size = 0;
for(int j = -1;j < 2;j++){
for(int k = -1;k < 2;k++){
int new_x = x + j;
int new_y = y + k;
if(new_x < 0 || new_y < 0 || new_x >= width || new_y >= height){
continue;
}
if(region_id(new_y,new_x) != current_region_id){
continue;
}
if(visited(new_y,new_x) == 1){
continue;
}
new_boundary(new_boundary_size,0) = new_x;
new_boundary(new_boundary_size,1) = new_y;
new_boundary_size = new_boundary_size + 1;
visited(new_y,new_x) = 1;
tangent(new_y,new_x) = tangent(y,x);
}
}
}
// if no boundary points found, exit
if(new_boundary_size == 0){
break;
}
}
}
示例10:
Mat_<float> MotionDetection::SubstractAllPixels(float value, Mat_<float> frame)
{
Mat_<float> temp = frame.clone();
for(int i = 0; i < frame.cols; i++)
{
for(int j = 0; j < frame.rows; j++)
{
temp.at<float>(Point(i,j)) = value - frame.at<float>(Point(i,j));
}
}
return temp;
}
示例11: Warp
//=============================================================================
// cropping from the source image to the destination image using the shape in s, used to determine if shape fitting converged successfully
void PAW::Warp(const Mat& image_to_warp, Mat& destination_image, const Mat_<double>& landmarks_to_warp)
{
// set the current shape
source_landmarks = landmarks_to_warp.clone();
// prepare the mapping coefficients using the current shape
this->CalcCoeff();
// Do the actual mapping computation (where to warp from)
this->WarpRegion(map_x, map_y);
// Do the actual warp (with bi-linear interpolation)
remap(image_to_warp, destination_image, map_x, map_y, CV_INTER_LINEAR);
}
示例12: applyFilter
void applyFilter(Mat_<float> src, Mat_<float> output)
{
int wxOrig = src.cols;
int wyOrig = src.rows;
int m = cv::getOptimalDFTSize(2*wyOrig);
int n = cv::getOptimalDFTSize(2*wxOrig);
copyMakeBorder(src, src, 0, m - wyOrig, 0, n - wxOrig, cv::BORDER_CONSTANT, cv::Scalar::all(0));
const int wx = src.cols, wy = src.rows;
const int cx = wx/2, cy = wy/2;
std::cout << wxOrig << " " << wyOrig << std::endl;
std::cout << wx << " " << wy << std::endl;
std::cout << cx << " " << cy << std::endl;
cv::Mat_<float> imgs[] = {src.clone(), cv::Mat_<float>::zeros(wy, wx)};
cv::Mat_<cv::Vec2f> img_dft;
cv::merge(imgs, 2, img_dft);
cv::dft(img_dft, img_dft);
dftshift(img_dft);
cout << "helo " << endl;
cv::Mat hpf = BHPF(3000, 4, wy, wx, cx, cy);
cv::mulSpectrums(hpf, img_dft, img_dft, cv::DFT_ROWS);
dftshift(img_dft);
cv::idft(img_dft, img_dft); //the result is a 2 channel image
split(img_dft, imgs);
normalize(imgs[0], output, 0, 1, CV_MINMAX);
cv::Mat_<float> croppedOutput(output,cv::Rect(0,0,wxOrig,wyOrig));
output = croppedOutput;
namedWindow("High-pass filtered input",WINDOW_NORMAL);
namedWindow("Input",WINDOW_NORMAL);
cv::imshow("Input", src);
cv::imshow("High-pass filtered input", croppedOutput);
imwrite("/home/student/ROVI/VisMand1/build-vis_man_1-Desktop-Debug/output/out.jpg",croppedOutput);
cout << "lol" << endl;
cv::waitKey(0);
}
示例13: VO_BasicAAMFitting
/**
* @author JIA Pei
* @version 2010-05-20
* @brief Basic AAM Fitting, for dynamic image sequence
* @param iImg Input - image to be fitted
* @param ioShape Input and Output - the fitted shape
* @param oImg Output - the fitted image
* @param epoch Input - the iteration epoch
*/
float VO_FittingAAMBasic::VO_BasicAAMFitting(const Mat& iImg,
VO_Shape& ioShape,
Mat& oImg,
unsigned int epoch)
{
this->m_VOFittingShape.clone(ioShape);
double t = (double)cvGetTickCount();
this->SetProcessingImage(iImg, this->m_VOAAMBasic);
this->m_iIteration = 0;
// Get m_MatModelAlignedShapeParam and m_fScale, m_vRotateAngles, m_MatCenterOfGravity
this->m_VOAAMBasic->VO_CalcAllParams4AnyShapeWithConstrain( this->m_VOFittingShape,
this->m_MatModelAlignedShapeParam,
this->m_fScale,
this->m_vRotateAngles,
this->m_MatCenterOfGravity);
this->m_VOFittingShape.ConstrainShapeInImage(this->m_ImageProcessing);
// Get m_MatModelNormalizedTextureParam
VO_TextureModel::VO_LoadOneTextureFromShape(this->m_VOFittingShape,
this->m_ImageProcessing,
this->m_vTriangle2D,
this->m_vPointWarpInfo,
this->m_VOFittingTexture );
// estimate the texture model parameters
this->m_VOAAMBasic->VO_CalcAllParams4AnyTexture(this->m_VOFittingTexture, this->m_MatModelNormalizedTextureParam);
// Calculate m_MatCurrentC
this->m_VOAAMBasic->VO_SParamTParamProjectToCParam( this->m_MatModelAlignedShapeParam,
this->m_MatModelNormalizedTextureParam,
this->m_MatCurrentC );
// Set m_MatCurrentT, m_MatDeltaT, m_MatEstimatedT, m_MatDeltaC, m_MatEstimatedC, etc.
this->m_MatCurrentT = Mat_<float>::zeros(this->m_MatCurrentT.size());
this->m_MatDeltaT = Mat_<float>::zeros(this->m_MatDeltaT.size());
this->m_MatEstimatedT = Mat_<float>::zeros(this->m_MatEstimatedT.size());
this->m_MatDeltaC = Mat_<float>::zeros(this->m_MatDeltaC.size());
this->m_MatEstimatedC = Mat_<float>::zeros(this->m_MatEstimatedC.size());
//////////////////////////////////////////////////////////////////////////////////////////////////////
// explained by JIA Pei. 2010-05-20
// For the first round, this->m_VOFittingShape should not change after calling "VO_CParamTParam2FittingShape"
// But this is not the case. why?
// Before calling VO_CParamTParam2FittingShape, this->m_VOFittingShape is calculated by
// a) assigning m_VOTemplateAlignedShape
// b) align to the real-size face using detected eyes and mouth
// c) constrain the shape within the image
// d) constrain the shape parameters and calculate those rigid transform parameters
// cout << this->m_VOFittingShape << endl;
//////////////////////////////////////////////////////////////////////////////////////////////////////
// Estimate m_VOFittingShape and m_VOFittingTexture
this->VO_CParamTParam2FittingShape( this->m_MatCurrentC,
this->m_MatCurrentT,
this->m_VOModelNormalizedTexture,
this->m_VOFittingShape,
this->m_fScale,
this->m_vRotateAngles,
this->m_MatCenterOfGravity );
this->m_VOFittingShape.ConstrainShapeInImage(this->m_ImageProcessing); // Remember to call ConstrainShapeInImage() whenever you update m_VOFittingShape
//////////////////////////////////////////////////////////////////////////////////////////////////////
// When calling VO_CParamTParam2FittingShape, this->m_VOFittingShape is calculated by
// a) c parameters to reconstruct shape parameters
// b) shape parameters to reconstruct shape
// c) align to the real-size face by global shape normalization
// cout << this->m_VOFittingShape << endl;
//////////////////////////////////////////////////////////////////////////////////////////////////////
this->m_E_previous = this->m_E = this->VO_CalcErrorImage(this->m_ImageProcessing,
this->m_VOFittingShape,
this->m_VOModelNormalizedTexture,
this->m_VOTextureError);
do
{
float estScale = this->m_fScale;
vector<float> estRotateAngles = this->m_vRotateAngles;
Mat_<float> estCOG = this->m_MatCenterOfGravity.clone();
bool cBetter = false;
bool poseBetter = false;
/**First shape parameters, c parameters. refer to equation (9.3)
* Cootes "Statistical Model of Appearance for Computer Vision" */
cv::gemm(this->m_VOTextureError.GetTheTextureInARow(), this->m_VOAAMBasic->m_MatRc, -1, Mat(), 0.0, this->m_MatDeltaC, GEMM_2_T);
// damp -- C
for(unsigned int i = 0; i < k_values.size(); i++)
{
// make damped c prediction
cv::scaleAdd(this->m_MatDeltaC, k_values[i], this->m_MatCurrentC, this->m_MatEstimatedC);
// make sure m_MatEstimatedC are constrained
//.........这里部分代码省略.........
示例14: initialize_simplex
static int initialize_simplex(Mat_<double>& c, Mat_<double>& b,double& v,vector<int>& N,vector<int>& B,vector<unsigned int>& indexToRow){
N.resize(c.cols);
N[0]=0;
for (std::vector<int>::iterator it = N.begin()+1 ; it != N.end(); ++it){
*it=it[-1]+1;
}
B.resize(b.rows);
B[0]=(int)N.size();
for (std::vector<int>::iterator it = B.begin()+1 ; it != B.end(); ++it){
*it=it[-1]+1;
}
indexToRow.resize(c.cols+b.rows);
indexToRow[0]=0;
for (std::vector<unsigned int>::iterator it = indexToRow.begin()+1 ; it != indexToRow.end(); ++it){
*it=it[-1]+1;
}
v=0;
int k=0;
{
double min=DBL_MAX;
for(int i=0;i<b.rows;i++){
if(b(i,b.cols-1)<min){
min=b(i,b.cols-1);
k=i;
}
}
}
if(b(k,b.cols-1)>=0){
N.erase(N.begin());
for (std::vector<unsigned int>::iterator it = indexToRow.begin()+1 ; it != indexToRow.end(); ++it){
--(*it);
}
return 0;
}
Mat_<double> old_c=c.clone();
c=0;
c(0,0)=-1;
for(int i=0;i<b.rows;i++){
b(i,0)=-1;
}
print_simplex_state(c,b,v,N,B);
dprintf(("\tWE MAKE PIVOT\n"));
pivot(c,b,v,N,B,k,0,indexToRow);
print_simplex_state(c,b,v,N,B);
inner_simplex(c,b,v,N,B,indexToRow);
dprintf(("\tAFTER INNER_SIMPLEX\n"));
print_simplex_state(c,b,v,N,B);
unsigned int nsize = (unsigned int)N.size();
if(indexToRow[0]>=nsize){
int iterator_offset=indexToRow[0]-nsize;
if(b(iterator_offset,b.cols-1)>0){
return SOLVELP_UNFEASIBLE;
}
pivot(c,b,v,N,B,iterator_offset,0,indexToRow);
}
vector<int>::iterator iterator;
{
int iterator_offset=indexToRow[0];
iterator=N.begin()+iterator_offset;
std::iter_swap(iterator,N.begin());
SWAP(int,indexToRow[*iterator],indexToRow[0]);
swap_columns(c,iterator_offset,0);
swap_columns(b,iterator_offset,0);
}
dprintf(("after swaps\n"));
print_simplex_state(c,b,v,N,B);
//start from 1, because we ignore x_0
c=0;
v=0;
for(int I=1;I<old_c.cols;I++){
if(indexToRow[I]<nsize){
dprintf(("I=%d from nonbasic\n",I));
int iterator_offset=indexToRow[I];
c(0,iterator_offset)+=old_c(0,I);
print_matrix(c);
}else{
dprintf(("I=%d from basic\n",I));
int iterator_offset=indexToRow[I]-nsize;
c-=old_c(0,I)*b.row(iterator_offset).colRange(0,b.cols-1);
v+=old_c(0,I)*b(iterator_offset,b.cols-1);
print_matrix(c);
}
}
dprintf(("after restore\n"));
print_simplex_state(c,b,v,N,B);
N.erase(N.begin());
//.........这里部分代码省略.........
示例15: svd
Mat_<double> estimateRotTransl(
Mat_<double> const worldPts,
Mat_<double> const imagePts)
{
assert(imagePts.cols == 2);
assert(worldPts.cols == 3);
assert(imagePts.rows == worldPts.rows);
// TODO verify all worldPts have z=0
// See "pose estimation" section in the paper.
// Set up linear system of equations.
int const n = imagePts.rows;
Mat_<double> F(2 * n, 9);
for(int i = 0; i < n; i++)
{
F(2 * i, 0) = worldPts(i, 0);
F(2 * i, 1) = 0;
F(2 * i, 2) = -worldPts(i, 0) * imagePts(i, 0);
F(2 * i, 3) = worldPts(i, 1);
F(2 * i, 4) = 0;
F(2 * i, 5) = -worldPts(i, 1) * imagePts(i, 0);
F(2 * i, 6) = 1;
F(2 * i, 7) = 0;
F(2 * i, 8) = -imagePts(i, 0);
F(2 * i + 1, 0) = 0;
F(2 * i + 1, 1) = worldPts(i, 0);
F(2 * i + 1, 2) = -worldPts(i, 0) * imagePts(i, 1);
F(2 * i + 1, 3) = 0;
F(2 * i + 1, 4) = worldPts(i, 1);
F(2 * i + 1, 5) = -worldPts(i, 1) * imagePts(i, 1);
F(2 * i + 1, 6) = 0;
F(2 * i + 1, 7) = 1;
F(2 * i + 1, 8) = -imagePts(i, 1);
}
// Find least-squares estimate of rotation + translation.
SVD svd(F);
Mat_<double> rrp = svd.vt.row(8);
rrp = rrp.clone().reshape(0, 3).t();
if(rrp(2, 2) < 0) {
rrp *= -1; // make sure depth is positive
}
// cout << "rrp: " << rrp << endl;
Mat_<double> transl = \
2 * rrp.col(2) / (norm(rrp.col(0)) + norm(rrp.col(1)));
// cout << "transl: " << transl << endl;
Mat_<double> rot = Mat_<double>::zeros(3, 3);
rrp.col(0).copyTo(rot.col(0));
rrp.col(1).copyTo(rot.col(1));
SVD svd2(rot);
rot = svd2.u * svd2.vt;
if(determinant(rot) < 0) {
rot.col(2) *= -1; // make sure it's a valid rotation matrix
}
if(abs(determinant(rot) - 1) > 1e-10) {
cerr << "Warning: rotation matrix has determinant " \
<< determinant(rot) << " where expected 1." << endl;
}
// cout << "rot: " << rot << endl;
Mat_<double> rotTransl(3, 4);
rot.col(0).copyTo(rotTransl.col(0));
rot.col(1).copyTo(rotTransl.col(1));
rot.col(2).copyTo(rotTransl.col(2));
transl.copyTo(rotTransl.col(3));
return rotTransl;
}