本文整理汇总了C++中Mat_::copyTo方法的典型用法代码示例。如果您正苦于以下问题:C++ Mat_::copyTo方法的具体用法?C++ Mat_::copyTo怎么用?C++ Mat_::copyTo使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Mat_
的用法示例。
在下文中一共展示了Mat_::copyTo方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: worldHomToCameraHom
Mat_<double> worldHomToCameraHom(
Mat_<double> const worldPtsHom,
Mat_<double> const rotMatrix,
Mat_<double> const translation)
{
assert(worldPtsHom.cols == 4);
assert(rotMatrix.rows == 3);
assert(rotMatrix.cols == 3);
assert(translation.rows == 3);
assert(translation.cols == 1);
// Convert rotMatrix + translation into a linear transformation in
// homogeneous coordinates.
Mat_<double> rigidMotion = Mat_<double>::zeros(4, 4);
rotMatrix.copyTo(rigidMotion(Range(0, 3), Range(0, 3)));
translation.copyTo(rigidMotion(Range(0, 3), Range(3, 4)));
rigidMotion(3, 3) = 1;
// cout << "rigidMotion: " << rigidMotion << endl;
// Assuming camera calibration matrix is identity.
// Note that OpenCV treats size as "[cols rows]", so matrix multiplication
// has to be done backwards (transpose everything).
Mat_<double> projection = Mat_<double>::eye(3, 4);
Mat result = projection * rigidMotion * worldPtsHom.t();
return result.t();
}
示例2: minimize
double ConjGradSolverImpl::minimize(InputOutputArray x){
CV_Assert(_Function.empty()==false);
dprintf(("termcrit:\n\ttype: %d\n\tmaxCount: %d\n\tEPS: %g\n",_termcrit.type,_termcrit.maxCount,_termcrit.epsilon));
Mat x_mat=x.getMat();
CV_Assert(MIN(x_mat.rows,x_mat.cols)==1);
int ndim=MAX(x_mat.rows,x_mat.cols);
CV_Assert(x_mat.type()==CV_64FC1);
if(d.cols!=ndim){
d.create(1,ndim);
r.create(1,ndim);
r_old.create(1,ndim);
minimizeOnTheLine_buf1.create(1,ndim);
minimizeOnTheLine_buf2.create(1,ndim);
}
Mat_<double> proxy_x;
if(x_mat.rows>1){
buf_x.create(1,ndim);
Mat_<double> proxy(ndim,1,buf_x.ptr<double>());
x_mat.copyTo(proxy);
proxy_x=buf_x;
}else{
proxy_x=x_mat;
}
_Function->getGradient(proxy_x.ptr<double>(),d.ptr<double>());
d*=-1.0;
d.copyTo(r);
//here everything goes. check that everything is setted properly
dprintf(("proxy_x\n"));print_matrix(proxy_x);
dprintf(("d first time\n"));print_matrix(d);
dprintf(("r\n"));print_matrix(r);
for(int count=0;count<_termcrit.maxCount;count++){
minimizeOnTheLine(_Function,proxy_x,d,minimizeOnTheLine_buf1,minimizeOnTheLine_buf2);
r.copyTo(r_old);
_Function->getGradient(proxy_x.ptr<double>(),r.ptr<double>());
r*=-1.0;
double r_norm_sq=norm(r);
if(_termcrit.type==(TermCriteria::MAX_ITER+TermCriteria::EPS) && r_norm_sq<_termcrit.epsilon){
break;
}
r_norm_sq=r_norm_sq*r_norm_sq;
double beta=MAX(0.0,(r_norm_sq-r.dot(r_old))/r_norm_sq);
d=r+beta*d;
}
if(x_mat.rows>1){
Mat(ndim, 1, CV_64F, proxy_x.ptr<double>()).copyTo(x);
}
return _Function->calc(proxy_x.ptr<double>());
}
示例3:
/**
* @author JIA Pei
* @version 2010-02-05
* @brief Calculate statistics for all profiles; Computer all landmarks' mean prof and covariance matrix
* @return void
*/
void VO_ASMNDProfiles::VO_CalcStatistics4AllProfiles()
{
// Calcuate Inverse of Sg
for(unsigned int i = 0; i < this->m_iNbOfPyramidLevels; i++)
{
Mat_<float> allProfiles = Mat_<float>::zeros( this->m_iNbOfSamples, this->m_vvvNormalizedProfiles[0][i][0].GetProfileLength() );
Mat_<float> Covar = Mat_<float>::zeros(this->m_vvvNormalizedProfiles[0][i][0].GetProfileLength(),
this->m_vvvNormalizedProfiles[0][i][0].GetProfileLength() );
Mat_<float> Mean = Mat_<float>::zeros(1, this->m_vvvNormalizedProfiles[0][i][0].GetProfileLength() );
for(unsigned int j = 0; j < this->m_iNbOfPoints; j++)
{
for(unsigned int k = 0; k < this->m_iNbOfProfileDim; k++)
{
for(unsigned int l = 0; l < this->m_iNbOfSamples; l++)
{
Mat_<float> tmpRow = allProfiles.row(l);
Mat_<float> tmp = this->m_vvvNormalizedProfiles[l][i][j].GetTheProfile().col(k).t();
tmp.copyTo(tmpRow);
}
// OK! Now We Calculate the Covariance Matrix of prof for Landmark iPoint
cv::calcCovarMatrix( allProfiles, Covar, Mean, CV_COVAR_NORMAL+CV_COVAR_ROWS+CV_COVAR_SCALE, CV_32F);
// cv::calcCovarMatrix( allProfiles, Covar, Mean, CV_COVAR_SCRAMBLED+CV_COVAR_ROWS, CV_32F);
this->m_vvMeanNormalizedProfile[i][j].Set1DimProfile(Mean.t(), k);
// Explained by YAO Wei, 2008-1-29.
// Actually Covariance Matrix is semi-positive definite. But I am not sure
// whether it is invertible or not!!!
// In my opinion it is un-invert, since C = X.t() * X!!!
cv::invert(Covar, this->m_vvvCVMInverseOfSg[i][j][k], DECOMP_SVD);
}
}
}
}
示例4: cartToHom
Mat_<double> cartToHom(Mat_<double> const cart)
{
Mat_<double> hom(cart.rows, cart.cols + 1, 1.0);
Mat_<double> tmp = hom(Range::all(), Range(0, cart.cols));
cart.copyTo(tmp);
return hom;
}
示例5: scannls
void NNLSOptimizer::scannls(const Mat& A, const Mat& b,Mat &x)
{
int iter = 0;
int m = A.size().height;
int n = A.size().width;
Mat_<double> AT = A.t();
double error = 1e-8;
Mat_<double> H = AT*A;
Mat_<double> f = -AT*b;
Mat_<double> x_old = Mat_<double>::zeros(n,1);
Mat_<double> x_new = Mat_<double>::zeros(n,1);
Mat_<double> mu_old = Mat_<double>::zeros(n,1);
Mat_<double> mu_new = Mat_<double>::zeros(n,1);
Mat_<double> r = Mat_<double>::zeros(n,1);
f.copyTo(mu_old);
while(iter < NNLS_MAX_ITER)
{
iter++;
for(int k=0;k<n;k++)
{
x_old.copyTo(x_new);
x_new(k,0) = std::max(0.0, x_old(k,0) - (mu_old(k,0)/H(k,k)) );
if(x_new(k,0) != x_old(k,0))
{
r = mu_old + (x_new(k,0) - x_old(k,0))*H.col(k);
r.copyTo(mu_new);
}
x_new.copyTo(x_old);
mu_new.copyTo(mu_old);
}
if(eKKT(H,f,x_new,error) == true)
{
break;
}
}
x_new.copyTo(x);
}
示例6: Cluster
MatCluster::MatCluster(const cv::Mat_<unsigned char> &wholeOrPart,
int count,
const cv::Rect &boundsInWhole,
std::vector<cv::Point> deepest,
int smoothing, bool copy, bool matIsWhole)
: Cluster(count, boundsInWhole, deepest, smoothing)
{
Mat_<unsigned char> part = matIsWhole ? wholeOrPart(boundsInWhole) : wholeOrPart;
if (copy) part.copyTo(mat);
else mat = part;
}
示例7: readMat
void readMat(cv::Mat_<T>& mat, char* file){
ifstream* infile = new ifstream(file,ifstream::in|ifstream::binary);
int rows = 0,cols = 0,type=0,size=0;
(*infile)>>rows;
(*infile)>>cols;
(*infile)>>type;
(*infile)>>size;
char* data = new char[size];
infile->read(data,size);
infile->close();
int sizes[2] = {rows,cols};
Mat_<T> temp = Mat(2,sizes,type,data);
temp.copyTo(mat);
delete[] data;
}
示例8: copyMakeBorder
int copyMakeBorder(/*const*/ Mat_<_Tp, chs>& src, Mat_<_Tp, chs>& dst, int top, int bottom, int left, int right, int borderType, const Scalar& value = Scalar())
{
FBC_Assert(top >= 0 && bottom >= 0 && left >= 0 && right >= 0);
if (src.isSubmatrix() && (borderType & BORDER_ISOLATED) == 0) {
Size wholeSize;
Point ofs;
src.locateROI(wholeSize, ofs);
int dtop = std::min(ofs.y, top);
int dbottom = std::min(wholeSize.height - src.rows - ofs.y, bottom);
int dleft = std::min(ofs.x, left);
int dright = std::min(wholeSize.width - src.cols - ofs.x, right);
src.adjustROI(dtop, dbottom, dleft, dright);
top -= dtop;
left -= dleft;
bottom -= dbottom;
right -= dright;
}
if (dst.empty() || dst.rows != (src.rows + top + bottom) || dst.cols != (src.cols + left + right)) {
dst.release();
dst = Mat_<_Tp, chs>(src.rows + top + bottom, src.cols + left + right);
}
if (top == 0 && left == 0 && bottom == 0 && right == 0) {
if (src.data != dst.data || src.step != dst.step)
src.copyTo(dst);
return 0;
}
borderType &= ~BORDER_ISOLATED;
if (borderType != BORDER_CONSTANT) {
copyMakeBorder_8u(src.ptr(), src.step, src.size(), dst.ptr(), dst.step, dst.size(), top, left, src.elemSize(), borderType);
} else {
int cn = src.channels, cn1 = cn;
AutoBuffer<double> buf(cn);
scalarToRawData<_Tp, chs>(value, buf, cn);
copyMakeConstBorder_8u(src.ptr(), src.step, src.size(), dst.ptr(), dst.step, dst.size(), top, left, (int)src.elemSize(), (uchar*)(double*)buf);
}
return 0;
}
示例9: VO_CalcModifiedSDI
/**
* @author JIA Pei
* @version 2010-04-03
* @brief Calculate modified steepest descent image for template face - project out appearance variation
* @return void
*/
void VO_AAMInverseIA::VO_CalcModifiedSDI()
{
//project out appearance variation i.e. modify the steepest descent image
this->m_MatSteepestDescentImages = Mat_<float>::zeros(this->m_iNbOfTextures, this->m_iNbOfShapeEigens+4);
this->m_MatModifiedSteepestDescentImages = Mat_<float>::zeros(this->m_iNbOfTextures, this->m_iNbOfShapeEigens+4);
for (unsigned int i = 0; i < this->m_iNbOfTextures; i++)
{
// AAM Revisited (63)
for (unsigned int j = 0; j < 4; j++)
{
this->m_MatSteepestDescentImages(i, j) = this->m_MatSteepestDescentImages4GlobalShapeNormalization(i, j);
}
// AAM Revisited (64)
for (unsigned int j = 0; j < this->m_iNbOfShapeEigens; j++)
{
this->m_MatSteepestDescentImages(i, 4+j) = this->m_MatSteepestDescentImages4ShapeModel(i, j);
}
}
Mat_<float> oneCol = Mat_<float>::zeros(this->m_iNbOfTextures, 1);
Mat_<float> spanedsum = Mat_<float>::zeros(this->m_iNbOfTextures, 1);
Mat_<float> modifiedoneCol = Mat_<float>::zeros(this->m_iNbOfTextures, 1);
Mat_<float> oneSpanRowTranspose = Mat_<float>::zeros(this->m_iNbOfTextures, 1);
for (unsigned int i = 0; i < this->m_MatSteepestDescentImages.cols; i++)
{
spanedsum = Mat_<float>::zeros(this->m_iNbOfTextures, 1);
oneCol = this->m_MatSteepestDescentImages.col(i);
for (unsigned int j = 0; j < this->m_iNbOfTextureEigens; j++)
{
oneSpanRowTranspose = this->m_PCANormalizedTexture.eigenvectors.row(j).t();
double weight = oneSpanRowTranspose.dot(oneCol);
// dst(I)=src1(I)*alpha+src2(I)*beta+gamma
cv::addWeighted( spanedsum, 1.0, oneSpanRowTranspose, weight, 0.0, spanedsum );
}
cv::subtract(oneCol, spanedsum, modifiedoneCol);
Mat_<float> tmpCol = this->m_MatModifiedSteepestDescentImages.col(i);
modifiedoneCol.copyTo(tmpCol);
}
}
示例10: firstDer
Mat_<float> HistogramFeatures::derivateHistogram(Mat_<float> &Histogram){
float tmp1, tmp2;
Mat_<float> firstDer;
Histogram.copyTo(firstDer);
firstDer = cv::Mat::zeros(Histogram.rows,Histogram.cols,Histogram.type());
firstDer(0,0) = Histogram(0,1) - Histogram(0,0);
for(int i = 1; i < Histogram.cols; ++i){
tmp1 = Histogram(0,i) - Histogram(0,i-1);
tmp2 = Histogram(0,i+1) - Histogram(0,i);
firstDer(0,i) = (tmp2 + tmp1)/2;
}
firstDer(0,Histogram.cols-1) = Histogram(0,Histogram.cols-1) - Histogram(0,Histogram.cols-2);
return firstDer;
// int i;
// float tmp1, tmp2;
// //compute derivative first index
// firstderv[0] = data[1]-data[0];
// for(i=1; i<nx-2; i++) //compute first derivative
// {
// tmp1 = data[i]-data[i-1];
// tmp2 = data[i+1]-data[i];
// firstderv[i] = (tmp2+tmp1)/2;
// }
// //compute derivative last index
// firstderv[nx-1] = data[nx-1]-data[nx-2];
}
示例11: computeSplineCoeffs
void CThinPlateSpline::computeSplineCoeffs(std::vector<Point>& iPIn, std::vector<Point>& iiPIn, float lambda,const TPS_INTERPOLATION tpsInter)
{
std::vector<Point>* iP = NULL;
std::vector<Point>* iiP = NULL;
if(tpsInter == FORWARD_WARP)
{
iP = &iPIn;
iiP = &iiPIn;
// keep information which coefficients are set
FLAG_COEFFS_BACK_WARP_SET = true;
FLAG_COEFFS_FORWARD_WARP_SET = false;
}
else if(tpsInter == BACK_WARP)
{
iP = &iiPIn;
iiP = &iPIn;
// keep information which coefficients are set
FLAG_COEFFS_BACK_WARP_SET = false;
FLAG_COEFFS_FORWARD_WARP_SET = true;
}
//get number of corresponding points
int dim = 2;
int n = iP->size();
//Initialize mathematical datastructures
Mat_<float> V(dim,n+dim+1,0.0);
Mat_<float> P(n,dim+1,1.0);
Mat_<float> K = (K.eye(n,n)*lambda);
Mat_<float> L(n+dim+1,n+dim+1,0.0);
// fill up K und P matrix
std::vector<Point>::iterator itY;
std::vector<Point>::iterator itX;
int y = 0;
for (itY = iP->begin(); itY != iP->end(); ++itY, y++) {
int x = 0;
for (itX = iP->begin(); itX != iP->end(); ++itX, x++) {
if (x != y) {
K(y, x) = (float)fktU(*itY, *itX);
}
}
P(y,1) = (float)itY->y;
P(y,2) = (float)itY->x;
}
Mat Pt;
transpose(P,Pt);
// insert K into L
Rect range = Rect(0, 0, n, n);
Mat Lr(L,range);
K.copyTo(Lr);
// insert P into L
range = Rect(n, 0, dim + 1, n);
Lr = Mat(L,range);
P.copyTo(Lr);
// insert Pt into L
range = Rect(0,n,n,dim+1);
Lr = Mat(L,range);
Pt.copyTo(Lr);
// fill V array
std::vector<Point>::iterator it;
int u = 0;
for(it = iiP->begin(); it != iiP->end(); ++it)
{
V(0,u) = (float)it->y;
V(1,u) = (float)it->x;
u++;
}
// transpose V
Mat Vt;
transpose(V,Vt);
cMatrix = Mat_<float>(n+dim+1,dim,0.0);
// invert L
Mat invL;
invert(L,invL,DECOMP_LU);
//multiply(invL,Vt,cMatrix);
cMatrix = invL * Vt;
//compensate for rounding errors
for(int row = 0; row < cMatrix.rows; row++)
{
for(int col = 0; col < cMatrix.cols; col++)
{
double v = cMatrix(row,col);
//.........这里部分代码省略.........
示例12: init_label_super
void spm_bp::init_label_super(Mat_<Vec2f>& label_super_k, Mat_<float>& dCost_super_k) //, vector<vector<Vec2f> > &label_saved, vector<vector<Mat_<float> > > &dcost_saved)
{
printf("==================================================\n");
printf("Initiating particles...Done!\n");
vector<Vec2f> label_vec;
Mat_<float> localDataCost;
for (int sp = 0; sp < numOfSP1; ++sp) {
int id = repPixels1[sp];
int y = subRange1[id][0];
int x = subRange1[id][1];
int h = subRange1[id][3] - subRange1[id][1] + 1;
int w = subRange1[id][2] - subRange1[id][0] + 1;
label_vec.clear();
int k = 0;
while (k < NUM_TOP_K) {
float du = (float(rand()) / RAND_MAX - 0.5) * 2 * (float)disp_range_u;
float dv = (float(rand()) / RAND_MAX - 0.5) * 2 * (float)disp_range_v;
du = floor(du * upScale + 0.5) / upScale;
dv = floor(dv * upScale + 0.5) / upScale;
if (du >= -disp_range_u && du <= disp_range_u && dv >= -disp_range_v && dv <= disp_range_v) {
int index_tp = 1;
for (int k1 = 0; k1 < k; ++k1) {
if (checkEqual_PMF_PMBP(label_super_k[repPixels1[sp]][k1], Vec2f(du, dv)))
index_tp = 0;
}
if (index_tp == 1) {
for (int ii = 0; ii < superpixelsList1[sp].size(); ++ii)
label_super_k[superpixelsList1[sp][ii]][k] = Vec2f(du, dv);
label_vec.push_back(Vec2f(du, dv));
++k;
}
}
}
#if USE_CLMF0_TO_AGGREGATE_COST
cv::Mat_<cv::Vec4b> leftCombinedCrossMap;
leftCombinedCrossMap.create(h, w);
subCrossMap1[sp].copyTo(leftCombinedCrossMap);
CFFilter cff;
#endif
int vec_size = label_vec.size();
localDataCost.create(h, w * vec_size);
localDataCost.setTo(0);
#pragma omp parallel for num_threads(NTHREADS)
for (int i = 0; i < vec_size; ++i) {
int kx = i * w;
Mat_<float> rawCost;
getLocalDataCostPerlabel(sp, label_vec[i], rawCost);
#if USE_CLMF0_TO_AGGREGATE_COST
cff.FastCLMF0FloatFilterPointer(leftCombinedCrossMap, rawCost, rawCost);
#endif
rawCost.copyTo(localDataCost(cv::Rect(kx, 0, w, h)));
}
//getLocalDataCost( sp, label_vec, localDataCost);
int pt, px, py, kx;
for (int ii = 0; ii < superpixelsList1[sp].size(); ++ii) {
//cout<<ii<<endl;
pt = superpixelsList1[sp][ii];
px = pt / width1;
py = pt % width1;
for (int kk = 0; kk < NUM_TOP_K; kk++) {
kx = kk * w;
const Mat_<float>& local = localDataCost(cv::Rect(kx, 0, w, h));
dCost_super_k[pt][kk] = local[px - x][py - y];
}
}
}
printf("==================================================\n");
}
示例13: runspm_bp
//.........这里部分代码省略.........
vec_label_nei.push_back(test_label);
}
}
repPixels1[sp] = superpixelsList1[sp][rand() % superpixelsList1[sp].size()];
for (int k = 0; k < NUM_TOP_K; k++) {
Vec2f test_label = label_k[repPixels1[sp]][k];
float mag = min(disp_range_u, disp_range_v) / 8;
for (; mag >= (1.0 / upScale); mag /= 2.0) {
Vec2f test_label_random;
float tmpVerLabel = test_label[0] + ((float(rand()) / RAND_MAX) - 0.5) * 2.0 * mag;
float tmpHorLabel = test_label[1] + ((float(rand()) / RAND_MAX) - 0.5) * 2.0 * mag;
test_label_random[0] = floor(tmpVerLabel * upScale + 0.5) / upScale;
test_label_random[1] = floor(tmpHorLabel * upScale + 0.5) / upScale;
if (test_label_random[0] >= -disp_range_u && test_label_random[0] <= disp_range_u
&& test_label_random[1] >= -disp_range_v && test_label_random[1] <= disp_range_v
&& isNewLabel_PMF_PMBP(vec_label_nei, test_label_random) == -1) //here
{
vec_label_nei.push_back(test_label_random);
}
}
}
const int vec_size = vec_label_nei.size();
// DataCost_nei.release();
DataCost_nei.create(h, w * vec_size);
DataCost_nei.setTo(0);
#if USE_CLMF0_TO_AGGREGATE_COST
cv::Mat_<cv::Vec4b> leftCombinedCrossMap;
leftCombinedCrossMap.create(h, w);
subCrossMap1[sp].copyTo(leftCombinedCrossMap);
CFFilter cff;
#endif
// Compute matching costs for each candidate particles
// printf("Computing matching costs start\n");
#pragma omp parallel for num_threads(NTHREADS)
for (int i = 0; i < vec_size; ++i) {
int kx = i * w;
Mat_<float> rawCost;
getLocalDataCostPerlabel(sp, vec_label_nei[i], rawCost);
#if USE_CLMF0_TO_AGGREGATE_COST
cff.FastCLMF0FloatFilterPointer(leftCombinedCrossMap, rawCost, rawCost);
#endif
rawCost.copyTo(DataCost_nei(cv::Rect(kx, 0, w, h)));
}
//getLocalDataCost(sp, vec_label_nei, DataCost_nei);
// printf("Computing matching costs end\n");
Vec4i curRange_s = spRange1[curSPP];
// int spw = curRange_s[2] - curRange_s[0] + 1;
// int sph = curRange_s[3] - curRange_s[1] + 1;
int spy_s, spy_e, spx_s, spx_e, spx_step, spy_step;
spy_s = curRange_s[0];
spy_e = curRange_s[2] + 1;
spx_s = curRange_s[1];
spx_e = curRange_s[3] + 1;
spx_step = 1;
spy_step = 1;
if (iter % 4 == 1) {
示例14: predict
int MultipleGraphsClassifier::predict(DisjointSetForest &segmentation, const Mat_<Vec3b> &image, const Mat_<float> &mask) {
int maxGraphSize = max(segmentation.getNumberOfComponents(), this->maxTrainingGraphSize);
int minGraphSize = min(segmentation.getNumberOfComponents(), this->minTrainingGraphSize);
if (minGraphSize <= this->k) {
cout<<"the smallest graph has size "<<minGraphSize<<", cannot compute "<<this->k<<" eigenvectors"<<endl;
exit(EXIT_FAILURE);
}
// compute each feature graph for the test sample
vector<WeightedGraph> featureGraphs;
featureGraphs.reserve(this->features.size());
for (int i = 0; i < (int)this->features.size(); i++) {
featureGraphs.push_back(this->computeFeatureGraph(i, segmentation, image, mask));
}
// order graphs by feature, to compute pattern vectors feature by
// feature.
vector<vector<WeightedGraph> > graphsByFeatures(this->features.size());
// add feature graphs for the test sample at index 0
for (int i = 0; i < (int)this->features.size(); i++) {
graphsByFeatures[i].reserve(this->trainingFeatureGraphs.size() + 1);
graphsByFeatures[i].push_back(featureGraphs[i]);
}
// add feature graphs for each training sample
for (int i = 0; i < (int)this->trainingFeatureGraphs.size(); i++) {
for (int j = 0; j < (int)this->features.size(); j++) {
graphsByFeatures[j].push_back(get<0>(this->trainingFeatureGraphs[i])[j]);
}
}
// compute the corresponding pattern vectors
vector<vector<VectorXd> > patternsByFeatures(this->features.size());
for (int i = 0; i < (int)this->features.size(); i++) {
patternsByFeatures[i] = patternVectors(graphsByFeatures[i], this->k, maxGraphSize);
}
// concatenate pattern vectors by image, converting to opencv format
// to work with cv's ML module.
Mat_<float> longPatterns = Mat_<float>::zeros(this->trainingFeatureGraphs.size() + 1, maxGraphSize * k * this->features.size());
for (int i = 0; i < (int)this->trainingFeatureGraphs.size() + 1; i++) {
VectorXd longPattern(maxGraphSize * k * this->features.size());
for (int j = 0; j < this->features.size(); j++) {
longPattern.block(j * maxGraphSize * k, 0, maxGraphSize * k, 1) = patternsByFeatures[j][i];
}
Mat_<double> cvLongPattern;
eigenToCv(longPattern, cvLongPattern);
Mat_<float> castLongPattern = Mat_<float>(cvLongPattern);
castLongPattern.copyTo(longPatterns.row(i));
}
// classification of long patterns using SVM
CvKNearest svmClassifier;
Mat_<int> classes(this->trainingFeatureGraphs.size(), 1);
for (int i = 0; i < (int)this->trainingFeatureGraphs.size(); i++) {
classes(i,0) = get<1>(this->trainingFeatureGraphs[i]);
}
svmClassifier.train(longPatterns.rowRange(1, longPatterns.rows), classes);
return (int)svmClassifier.find_nearest(longPatterns.row(0), 1);
}
开发者ID:alexisVallet,项目名称:animation-character-identification,代码行数:73,代码来源:MultipleGraphsClassifier.cpp
示例15: readDipImage
/*!
* @brief Import DIP Image from given path
* @param impath : path to the image
* @param[out] I : output image
*/
template<typename T> void readDipImage(const char * impath, DipImage<T> & I)
{
Mat_<T> J = imread(impath, 0);
J.copyTo(I);
};