本文整理汇总了C++中AAM_Shape类的典型用法代码示例。如果您正苦于以下问题:C++ AAM_Shape类的具体用法?C++ AAM_Shape怎么用?C++ AAM_Shape使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了AAM_Shape类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: InitShape
AAM_Shape AAMBody::InitShape(const AAM_Shape& pMeanShape)
{
cv::Size imgSize = imageMessageIn_->GetSize();
AAM_Shape detShape;
AAM_Shape startShape;
detShape.resize(2);
detShape[0].x = param_->boundingBox.x < 0 ? 0 : param_->boundingBox.x;
detShape[0].y = param_->boundingBox.y < 0 ? 0 : param_->boundingBox.y;
if(detShape[0].x > imgSize.width)
detShape[0].x = imgSize.width - param_->boundingBox.width;
if(detShape[0].y > imgSize.height)
detShape[0].y = imgSize.height - param_->boundingBox.height;
detShape[1].x = detShape[0].x + param_->boundingBox.width;
detShape[1].y = detShape[0].y + param_->boundingBox.height;
if(detShape[1].x > imgSize.width)
detShape[1].x = imgSize.width - param_->boundingBox.width;
if(detShape[1].y > imgSize.height)
detShape[1].y = imgSize.height - param_->boundingBox.height;
AdjustShape(detShape);
AlignShape(startShape, detShape, pMeanShape);
return startShape;
}
示例2: InitShape
void AAMFit::Fit( IplImage* pFrame, CvRect *pR, int pType )
{
if( _model == NULL ) return;
AAM_Shape currentShape = InitShape( _model->GetMeanShape(), pR, pType );
int iter = _model->Fit(pFrame, currentShape, 20, false); // TODO: Valahol szivárog egy kis memória - nem mindig
_imagePoints = currentShape.getPoints();
}
示例3: ShapeAAMFromASM
static AAM_Shape ShapeAAMFromASM(const asm_shape& shape)
{
AAM_Shape s;
s.resize(shape.NPoints());
for(int i = 0; i < shape.NPoints(); i++)
{
s[i].x = shape[i].x;
s[i].y = shape[i].y;
}
return s;
}
示例4: FaceSynthesis
void FacePredict::FaceSynthesis(AAM_Shape &shape, CvMat* texture, IplImage* newImage)
{
double thisfacewidth = shape.GetWidth();
shape.Scale(stdwidth / thisfacewidth);
shape.Translate(-shape.MinX(), -shape.MinY());
AAM_PAW paw;
CvMat* points = cvCreateMat (1, __shape.nPoints(), CV_32FC2);
CvMemStorage* storage = cvCreateMemStorage(0);
paw.Train(shape, points, storage, __paw.GetTri(), false); //the actual shape
__AAMRefShape.Translate(-__AAMRefShape.MinX(), -__AAMRefShape.MinY()); //refShape, central point is at (0,0);translate the min to (0,0)
double minV, maxV;
cvMinMaxLoc(texture, &minV, &maxV);
cvConvertScale(texture, texture, 1/(maxV-minV)*255, -minV*255/(maxV-minV));
cvZero(newImage);
int x1, x2, y1, y2, idx1 = 0, idx2 = 0;
int tri_idx, v1, v2, v3;
int minx, miny, maxx, maxy;
minx = shape.MinX(); miny = shape.MinY();
maxx = shape.MaxX(); maxy = shape.MaxY();
for(int y = miny; y < maxy; y++)
{
y1 = y-miny;
for(int x = minx; x < maxx; x++)
{
x1 = x-minx;
idx1 = paw.Rect(y1, x1);
if(idx1 >= 0)
{
tri_idx = paw.PixTri(idx1);
v1 = paw.Tri(tri_idx, 0);
v2 = paw.Tri(tri_idx, 1);
v3 = paw.Tri(tri_idx, 2);
x2 = paw.Alpha(idx1)*__AAMRefShape[v1].x + paw.Belta(idx1)*__AAMRefShape[v2].x +
paw.Gamma(idx1)*__AAMRefShape[v3].x;
y2 = paw.Alpha(idx1)*__AAMRefShape[v1].y + paw.Belta(idx1)*__AAMRefShape[v2].y +
paw.Gamma(idx1)*__AAMRefShape[v3].y;
idx2 = __paw.Rect(y2, x2);
if(idx2 < 0) continue;
CV_IMAGE_ELEM(newImage, byte, y, 3*x) = cvmGet(texture, 0, 3*idx2);
CV_IMAGE_ELEM(newImage, byte, y, 3*x+1) = cvmGet(texture, 0, 3*idx2+1);
CV_IMAGE_ELEM(newImage, byte, y, 3*x+2) = cvmGet(texture, 0, 3*idx2+2);
}
}
}
cvReleaseMat(&points);
cvReleaseMemStorage(&storage);
}
示例5: CalcMeanShape
//============================================================================
void AAM_PDM::CalcMeanShape(AAM_Shape &MeanShape,
const std::vector<AAM_Shape> &AllShapes)
{
MeanShape.resize(AllShapes[0].NPoints());
MeanShape = 0;
for(int i = 0; i < (int)AllShapes.size(); i++) MeanShape += AllShapes[i];
MeanShape /= AllShapes.size();
}
示例6: DrawAppearance
//============================================================================
void AAM_CAM::DrawAppearance(IplImage* image, const AAM_Shape& Shape, CvMat* Texture)
{
AAM_PAW paw;
int x1, x2, y1, y2, idx1 = 0, idx2 = 0;
int tri_idx, v1, v2, v3;
int minx, miny, maxx, maxy;
paw.Train(Shape, __Points, __Storage, __paw.GetTri(), false);
AAM_Shape refShape = __paw.__referenceshape;
double minV, maxV;
cvMinMaxLoc(Texture, &minV, &maxV);
cvConvertScale(Texture, Texture, 1/(maxV-minV)*255, -minV*255/(maxV-minV));
minx = Shape.MinX(); miny = Shape.MinY();
maxx = Shape.MaxX(); maxy = Shape.MaxY();
for(int y = miny; y < maxy; y++)
{
y1 = y-miny;
for(int x = minx; x < maxx; x++)
{
x1 = x-minx;
idx1 = paw.Rect(y1, x1);
if(idx1 >= 0)
{
tri_idx = paw.PixTri(idx1);
v1 = paw.Tri(tri_idx, 0);
v2 = paw.Tri(tri_idx, 1);
v3 = paw.Tri(tri_idx, 2);
x2 = paw.Alpha(idx1)*refShape[v1].x + paw.Belta(idx1)*refShape[v2].x +
paw.Gamma(idx1)*refShape[v3].x;
y2 = paw.Alpha(idx1)*refShape[v1].y + paw.Belta(idx1)*refShape[v2].y +
paw.Gamma(idx1)*refShape[v3].y;
idx2 = __paw.Rect(y2, x2);
if(idx2 < 0) continue;
CV_IMAGE_ELEM(image, byte, y, 3*x) = cvmGet(Texture, 0, 3*idx2);
CV_IMAGE_ELEM(image, byte, y, 3*x+1) = cvmGet(Texture, 0, 3*idx2+1);
CV_IMAGE_ELEM(image, byte, y, 3*x+2) = cvmGet(Texture, 0, 3*idx2+2);
}
}
}
}
示例7: cvCreateMat
//============================================================================
void AAM_TDM::Train(const file_lists& pts_files, const file_lists& img_files,
const AAM_PAW& m_warp,
double texture_percentage /* = 0.975 */,
bool registration /* = true */)
{
int nPoints = m_warp.nPoints();
int nPixels = m_warp.nPix()*3;
int nSamples = pts_files.size();
CvMat *AllTextures = cvCreateMat(nSamples, nPixels, CV_64FC1);
CvMat * matshape = cvCreateMat(1, nPoints*2, CV_64FC1);
for(int i = 0; i < nSamples; i++)
{
IplImage* image = cvLoadImage(img_files[i].c_str(), -1);
AAM_Shape trueshape;
if(!trueshape.ReadAnnotations(pts_files[i]))
trueshape.ScaleXY(image->width, image->height);
trueshape.Point2Mat(matshape);
AAM_Common::CheckShape(matshape, image->width, image->height);
CvMat t; cvGetRow(AllTextures, &t, i);
m_warp.CalcWarpTexture(matshape, image, &t);
cvReleaseImage(&image);
}
cvReleaseMat(&matshape);
// align texture so as to minimize the lighting variation
AAM_TDM::AlignTextures(AllTextures);
//now do pca
DoPCA(AllTextures, texture_percentage);
if(registration) SaveSeriesTemplate(AllTextures, m_warp);
cvReleaseMat(&AllTextures);
}
示例8: AlignShape
void AAMBody::AlignShape(AAM_Shape& pStartShape, const AAM_Shape& pDetShape, const AAM_Shape& pMeanShape)
{
AAM_Shape baseShape, alignedShape;
baseShape.resize(2);
alignedShape.resize(2);
double meanCenter = (pMeanShape.MinY() + pMeanShape.MaxY()) * 0.5;
baseShape[0].x = pMeanShape.MinX();
baseShape[0].y = meanCenter;
baseShape[1].x = pMeanShape.MaxX();
baseShape[1].y = meanCenter;
double yMean = (pDetShape[1].y + pDetShape[0].y) * 0.5;
alignedShape[0].x = pDetShape[0].x;
alignedShape[0].y = yMean;
alignedShape[1].x = pDetShape[1].x;
alignedShape[1].y = yMean;
double a, b, tx, ty;
baseShape.AlignTransformation(alignedShape, a, b, tx, ty);
pStartShape = pMeanShape;
pStartShape.TransformPose(a, b, tx, ty);
}
示例9: Fit
//============================================================================
void AAM_IC::Fit(const IplImage* image, AAM_Shape& Shape,
int max_iter /* = 30 */, bool showprocess /* = false */)
{
//initialize some stuff
double t = gettime;
const CvMat* A0 = __texture.GetMean();
CvMat p; cvGetCols(__search_pq, &p, 4, 4+__shape.nModes());
Shape.Point2Mat(__current_s);
SetAllParamsZero();
__shape.CalcParams(__current_s, __search_pq);
IplImage* Drawimg = 0;
for(int iter = 0; iter < max_iter; iter++)
{
if(showprocess)
{
if(Drawimg == 0) Drawimg = cvCloneImage(image);
else cvCopy(image, Drawimg);
Shape.Mat2Point(__current_s);
Draw(Drawimg, Shape, 2);
mkdir("result");
char filename[100];
sprintf(filename, "result/Iter-%02d.jpg", iter);
cvSaveImage(filename, Drawimg);
}
//check the current shape
AAM_Common::CheckShape(__current_s, image->width, image->height);
//warp image to mesh shape mesh
__paw.CalcWarpTexture(__current_s, image, __warp_t);
AAM_TDM::NormalizeTexture(A0, __warp_t);
cvSub(__warp_t, A0, __error_t);
//calculate updates (and scale to account for linear lighting gain)
cvGEMM(__error_t, __G, 1, NULL, 1, __delta_pq, CV_GEMM_B_T);
//check for parameter convergence
if(cvNorm(__delta_pq) < 1e-6) break;
//apply inverse compositional algorithm to update parameters
InverseCompose(__delta_pq, __current_s, __update_s);
//smooth shape
cvAddWeighted(__current_s, 0.4, __update_s, 0.6, 0, __update_s);
//update parameters
__shape.CalcParams(__update_s, __search_pq);
//calculate constrained new shape
__shape.CalcShape(__search_pq, __update_s);
//check for shape convergence
if(cvNorm(__current_s, __update_s, CV_L2) < 0.001) break;
else cvCopy(__update_s, __current_s);
}
Shape.Mat2Point(__current_s);
t = gettime-t;
printf("AAM IC Fitting time cost %.3f millisec\n", t);
cvReleaseImage(&Drawimg);
}
示例10: fprintf
//============================================================================
void AAM_IC::Train(const file_lists& pts_files,
const file_lists& img_files,
double scale /* = 1.0 */,
double shape_percentage /* = 0.975 */,
double texture_percentage /* = 0.975 */)
{
if(pts_files.size() != img_files.size())
{
fprintf(stderr, "ERROE(%s, %d): #Shapes != #Images\n",
__FILE__, __LINE__);
exit(0);
}
printf("################################################\n");
printf("Build Inverse Compositional Image Alignmennt Model...\n");
std::vector<AAM_Shape> AllShapes;
for(int ii = 0; ii < pts_files.size(); ii++)
{
AAM_Shape Shape;
bool flag = Shape.ReadAnnotations(pts_files[ii]);
if(!flag)
{
IplImage* image = cvLoadImage(img_files[ii].c_str(), -1);
Shape.ScaleXY(image->width, image->height);
cvReleaseImage(&image);
}
AllShapes.push_back(Shape);
}
//building shape and texture distribution model
printf("Build point distribution model...\n");
__shape.Train(AllShapes, scale, shape_percentage);
printf("Build warp information of mean shape mesh...");
__Points = cvCreateMat (1, __shape.nPoints(), CV_32FC2);
__Storage = cvCreateMemStorage(0);
double sp = 1.0;
//if(__shape.GetMeanShape().GetWidth() > 48)
// sp = 48/__shape.GetMeanShape().GetWidth();
__paw.Train(__shape.GetMeanShape()*sp, __Points, __Storage);
printf("[%d by %d, triangles #%d, pixels #%d*3]\n",
__paw.Width(), __paw.Height(), __paw.nTri(), __paw.nPix());
printf("Build texture distribution model...\n");
__texture.Train(pts_files, img_files, __paw, texture_percentage, true);
//calculate gradient of texture
printf("Calculating texture gradient...\n");
CvMat* dTx = cvCreateMat(1, __texture.nPixels(), CV_64FC1);
CvMat* dTy = cvCreateMat(1, __texture.nPixels(), CV_64FC1);
CalcTexGrad(__texture.GetMean(), dTx, dTy);
// save gradient image
mkdir("Modes");
__paw.SaveWarpTextureToImage("Modes/dTx.jpg", dTx);
__paw.SaveWarpTextureToImage("Modes/dTy.jpg", dTy);
//calculate warp Jacobian at base shape
printf("Calculating warp Jacobian...\n");
CvMat* Jx = cvCreateMat(__paw.nPix(), __shape.nModes()+4, CV_64FC1);
CvMat* Jy = cvCreateMat(__paw.nPix(), __shape.nModes()+4, CV_64FC1);
CalcWarpJacobian(Jx,Jy);
//calculate modified steepest descent image
printf("Calculating steepest descent images...\n");
CvMat* SD = cvCreateMat(__shape.nModes()+4, __texture.nPixels(), CV_64FC1);
CalcModifiedSD(SD, dTx, dTy, Jx, Jy);
//calculate inverse Hessian matrix
printf("Calculating Hessian inverse matrix...\n");
CvMat* H = cvCreateMat(__shape.nModes()+4, __shape.nModes()+4, CV_64FC1);
CalcHessian(H, SD);
//calculate update matrix (multiply inverse Hessian by modified steepest descent image)
__G = cvCreateMat(__shape.nModes()+4, __texture.nPixels(), CV_64FC1);
cvMatMul(H, SD, __G);
//release
cvReleaseMat(&Jx);
cvReleaseMat(&Jy);
cvReleaseMat(&dTx);
cvReleaseMat(&dTy);
cvReleaseMat(&SD);
cvReleaseMat(&H);
//alocate memory for on-line fitting stuff
__update_s0 = cvCreateMat(1, __shape.nPoints()*2, CV_64FC1);
__inv_pq = cvCreateMat(1, __shape.nModes()+4, CV_64FC1);
__warp_t = cvCreateMat(1, __texture.nPixels(), CV_64FC1);
__error_t = cvCreateMat(1, __texture.nPixels(), CV_64FC1);
__search_pq = cvCreateMat(1, __shape.nModes()+4, CV_64FC1);
__delta_pq = cvCreateMat(1, __shape.nModes()+4, CV_64FC1);
__current_s = cvCreateMat(1, __shape.nPoints()*2, CV_64FC1);
__update_s = cvCreateMat(1, __shape.nPoints()*2, CV_64FC1);
__lamda = cvCreateMat(1, __texture.nModes(), CV_64FC1);
//.........这里部分代码省略.........
示例11: cvLoadImage
//============================================================================
void AAM_CAM::Train(const file_lists& pts_files,
const file_lists& img_files,
double scale /* = 1.0 */,
double shape_percentage /* = 0.975 */,
double texture_percentage /* = 0.975 */,
double appearance_percentage /* = 0.975 */)
{
//building shape and texture distribution model
std::vector<AAM_Shape> AllShapes;
for(int ii = 0; ii < pts_files.size(); ii++)
{
AAM_Shape Shape;
bool flag = Shape.ReadAnnotations(pts_files[ii]);
if(!flag)
{
IplImage* image = cvLoadImage(img_files[ii].c_str(), -1);
Shape.ScaleXY(image->width, image->height);
cvReleaseImage(&image);
}
AllShapes.push_back(Shape);
}
printf("Build point distribution model...\n");
__shape.Train(AllShapes, scale, shape_percentage);
printf("Build warp information of mean shape mesh...");
__Points = cvCreateMat (1, __shape.nPoints(), CV_32FC2);
__Storage = cvCreateMemStorage(0);
AAM_Shape refShape = __shape.__AAMRefShape/* * scale */;
//if(refShape.GetWidth() > 50)
// refShape.Scale(50/refShape.GetWidth());
__paw.Train(refShape, __Points, __Storage);
printf("[%d by %d, %d triangles, %d*3 pixels]\n",
__paw.Width(), __paw.Height(), __paw.nTri(), __paw.nPix());
printf("Build texture distribution model...\n");
__texture.Train(pts_files, img_files, __paw, texture_percentage, true);
__pq = cvCreateMat(1, __shape.nModes()+4, CV_64FC1);
printf("Build combined appearance model...\n");
int nsamples = pts_files.size();
int npointsby2 = __shape.nPoints()*2;
int npixels = __texture.nPixels();
int nfeatures = __shape.nModes() + __texture.nModes();
CvMat* AllAppearances = cvCreateMat(nsamples, nfeatures, CV_64FC1);
CvMat* s = cvCreateMat(1, npointsby2, CV_64FC1);
CvMat* t = cvCreateMat(1, npixels, CV_64FC1);
__MeanS = cvCreateMat(1, npointsby2, CV_64FC1);
__MeanG = cvCreateMat(1, npixels, CV_64FC1);
cvCopy(__shape.GetMean(), __MeanS);
cvCopy(__texture.GetMean(), __MeanG);
//calculate ratio of shape to appearance
CvScalar Sum1 = cvSum(__shape.__ShapesEigenValues);
CvScalar Sum2 = cvSum(__texture.__TextureEigenValues);
__WeightsS2T = sqrt(Sum2.val[0] / Sum1.val[0]);
printf("Combine shape and texture parameters...\n");
for(int i = 0; i < nsamples; i++)
{
//Get Shape and Texture respectively
IplImage* image = cvLoadImage(img_files[i].c_str(), -1);
AAM_Shape Shape;
if(!Shape.ReadAnnotations(pts_files[i]))
Shape.ScaleXY(image->width, image->height);
Shape.Point2Mat(s);
AAM_Common::CheckShape(s, image->width, image->height);
__paw.CalcWarpTexture(s, image, t);
__texture.NormalizeTexture(__MeanG, t);
//combine shape and texture parameters
CvMat OneAppearance;
cvGetRow(AllAppearances, &OneAppearance, i);
ShapeTexture2Combined(s, t, &OneAppearance);
cvReleaseImage(&image);
}
//Do PCA of appearances
DoPCA(AllAppearances, appearance_percentage);
int np = __AppearanceEigenVectors->rows;
printf("Extracting the shape and texture part of the combined eigen vectors..\n");
// extract the shape part of the combined eigen vectors
CvMat Ps;
cvGetCols(__AppearanceEigenVectors, &Ps, 0, __shape.nModes());
__Qs = cvCreateMat(np, npointsby2, CV_64FC1);
cvMatMul(&Ps, __shape.GetBases(), __Qs);
cvConvertScale(__Qs, __Qs, 1.0/__WeightsS2T);
// extract the texture part of the combined eigen vectors
CvMat Pg;
cvGetCols(__AppearanceEigenVectors, &Pg, __shape.nModes(), nfeatures);
__Qg = cvCreateMat(np, npixels, CV_64FC1);
//.........这里部分代码省略.........
示例12: Fit
//============================================================================
void AAM_Basic::Fit(const IplImage* image, AAM_Shape& Shape,
int max_iter /* = 30 */,bool showprocess /* = false */)
{
//intial some stuff
double t = gettime;
double e1, e2;
const int np = 5;
double k_values[np] = {1, 0.5, 0.25, 0.125, 0.0625};
int k;
IplImage* Drawimg = 0;
Shape.Point2Mat(__s);
InitParams(image);
CvMat subcq;
cvGetCols(__current_c_q, &subcq, 0, 4); cvCopy(__q, &subcq);
cvGetCols(__current_c_q, &subcq, 4, 4+__cam.nModes()); cvCopy(__c, &subcq);
//calculate error
e1 = EstResidual(image, __current_c_q, __s, __t_m, __t_s, __delta_t);
//do a number of iteration until convergence
for(int iter = 0; iter <max_iter; iter++)
{
if(showprocess)
{
if(Drawimg == 0) Drawimg = cvCloneImage(image);
else cvCopy(image, Drawimg);
__cam.CalcShape(__s, __current_c_q);
Shape.Mat2Point(__s);
Draw(Drawimg, Shape, 2);
#ifdef TARGET_WIN32
mkdir("result");
#else
mkdir("result", S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
#endif
char filename[100];
sprintf(filename, "result/ter%d.bmp", iter);
cvSaveImage(filename, Drawimg);
}
// predict parameter update
cvGEMM(__delta_t, __G, 1, NULL, 0, __delta_c_q, CV_GEMM_B_T);
//force first iteration
if(iter == 0)
{
cvAdd(__current_c_q, __delta_c_q, __current_c_q);
CvMat c; cvGetCols(__current_c_q, &c, 4, 4+__cam.nModes());
//constrain parameters
__cam.Clamp(&c);
e1 = EstResidual(image, __current_c_q, __s, __t_m, __t_s, __delta_t);
}
//find largest step size which reduces texture EstResidual
else
{
for(k = 0; k < np; k++)
{
cvScaleAdd(__delta_c_q, cvScalar(k_values[k]), __current_c_q, __update_c_q);
//constrain parameters
CvMat c; cvGetCols(__update_c_q, &c, 4, 4+__cam.nModes());
__cam.Clamp(&c);
e2 = EstResidual(image, __update_c_q, __s, __t_m, __t_s, __delta_t);
if(e2 <= e1) break;
}
}
//check for convergence
if(iter > 0)
{
if(k == np)
{
e1 = e2;
cvCopy(__update_c_q, __current_c_q);
}
else if(fabs(e2-e1)<0.001*e1) break;
else if (cvNorm(__delta_c_q)<0.001) break;
else
{
cvCopy(__update_c_q, __current_c_q);
e1 = e2;
}
}
}
cvReleaseImage(&Drawimg);
__cam.CalcShape(__s, __current_c_q);
Shape.Mat2Point(__s);
t = gettime - t;
printf("AAM-Basic Fitting time cost: %.3f millisec\n", t);
}
示例13: cvCreateMat
//============================================================================
void AAM_Basic::CalcJacobianMatrix(const file_lists& pts_files,
const file_lists& img_files,
double disp_scale /* = 0.2 */,
double disp_angle /* = 20 */,
double disp_trans /* = 5.0 */,
double disp_std /* = 1.0 */,
int nExp /* = 30 */)
{
CvMat* J = cvCreateMat(__cam.nModes()+4, __cam.__texture.nPixels(), CV_64FC1);
CvMat* d = cvCreateMat(1, __cam.nModes()+4, CV_64FC1);
CvMat* o = cvCreateMat(1, __cam.nModes()+4, CV_64FC1);
CvMat* oo = cvCreateMat(1, __cam.nModes()+4, CV_64FC1);
CvMat* t = cvCreateMat(1, __cam.__texture.nPixels(), CV_64FC1);
CvMat* t_m = cvCreateMat(1, __cam.__texture.nPixels(), CV_64FC1);
CvMat* t_s = cvCreateMat(1, __cam.__texture.nPixels(), CV_64FC1);
CvMat* t1 = cvCreateMat(1, __cam.__texture.nPixels(), CV_64FC1);
CvMat* t2 = cvCreateMat(1, __cam.__texture.nPixels(), CV_64FC1);
CvMat* u = cvCreateMat(1, __cam.nModes()+4, CV_64FC1);
CvMat* c = cvCreateMat(1, __cam.nModes(), CV_64FC1);
CvMat* s = cvCreateMat(1, __cam.__shape.nPoints()*2, CV_64FC1);
CvMat* q = cvCreateMat(1, 4, CV_64FC1);
CvMat* p = cvCreateMat(1, __cam.__shape.nModes(),CV_64FC1);
CvMat* lamda = cvCreateMat(1, __cam.__texture.nModes(), CV_64FC1);
double theta = disp_angle * CV_PI / 180;
double aa = MAX(fabs(disp_scale*cos(theta)), fabs(disp_scale*sin(theta)));
cvmSet(d,0,0,aa); cvmSet(d,0,1,aa); cvmSet(d,0,2,disp_trans); cvmSet(d,0,3,disp_trans);
for(int nmode = 0; nmode < __cam.nModes(); nmode++)
cvmSet(d,0,4+nmode,disp_std*sqrt(__cam.Var(nmode)));
srand(unsigned(time(0)));
cvSetZero(u);cvSetZero(J);
for(int i = 0; i < pts_files.size(); i++)
{
IplImage* image = cvLoadImage(img_files[i].c_str(), -1);
AAM_Shape Shape;
if(!Shape.ReadAnnotations(pts_files[i]))
Shape.ScaleXY(image->width, image->height);
Shape.Point2Mat(s);
//calculate current texture vector
__cam.__paw.CalcWarpTexture(s, image, t);
__cam.__texture.NormalizeTexture(__cam.__MeanG, t);
//calculate appearance parameters
__cam.__shape.CalcParams(s, p, q);
__cam.__texture.CalcParams(t, lamda);
__cam.CalcParams(c, p, lamda);
//update appearance and pose parameters
CvMat subo;
cvGetCols(o, &subo, 0, 4); cvCopy(q, &subo);
cvGetCols(o, &subo, 4, 4+__cam.nModes()); cvCopy(c, &subo);
//get optimal EstResidual
EstResidual(image, o, s, t_m, t_s, t1);
for(int j = 0; j < nExp; j++)
{
printf("Pertubing (%d/%d) for image (%d/%d)...\r", j, nExp, i, pts_files.size());
for(int l = 0; l < 4+__cam.nModes(); l++)
{
double D = cvmGet(d,0,l);
double v = rand_in_between(-D, D);
cvCopy(o, oo); CV_MAT_ELEM(*oo,double,0,l) += v;
EstResidual(image, oo, s, t_m, t_s, t2);
cvSub(t1, t2, t2);
cvConvertScale(t2, t2, 1.0/v);
//accumulate into l-th row
CvMat Jl; cvGetRow(J, &Jl, l);
cvAdd(&Jl, t2, &Jl);
CV_MAT_ELEM(*u, double, 0, l) += 1.0;
}
}
cvReleaseImage(&image);
}
//normalize
for(int l = 0; l < __cam.nModes()+4; l++)
{
CvMat Jl; cvGetRow(J, &Jl, l);
cvConvertScale(&Jl, &Jl, 1.0/cvmGet(u,0,l));
}
CvMat* JtJ = cvCreateMat(__cam.nModes()+4, __cam.nModes()+4, CV_64FC1);
CvMat* InvJtJ = cvCreateMat(__cam.nModes()+4, __cam.nModes()+4, CV_64FC1);
cvGEMM(J, J, 1, NULL, 0, JtJ, CV_GEMM_B_T);
cvInvert(JtJ, InvJtJ, CV_SVD);
cvMatMul(InvJtJ, J, __G);
cvReleaseMat(&J); cvReleaseMat(&d); cvReleaseMat(&o);
cvReleaseMat(&oo); cvReleaseMat(&t); cvReleaseMat(&t_s);
cvReleaseMat(&t_m); cvReleaseMat(&t1); cvReleaseMat(&t2);
cvReleaseMat(&u); cvReleaseMat(&c); cvReleaseMat(&s);
cvReleaseMat(&q); cvReleaseMat(&p); cvReleaseMat(&lamda);
//.........这里部分代码省略.........
示例14: DrawAppearance
//============================================================================
void AAM_Basic::DrawAppearance(IplImage* image)
{
AAM_Shape Shape; Shape.Mat2Point(__current_s);
AAM_PAW paw;
paw.Train(Shape, __cam.__Points, __cam.__Storage, __cam.__paw.GetTri(), false);
int x1, x2, y1, y2, idx1, idx2;
int tri_idx, v1, v2, v3;
int xby3, idxby3;
int minx, miny, maxx, maxy;
AAM_Shape refShape; refShape.Mat2Point(__cam.__MeanS);
refShape.Translate(-refShape.MinX(), -refShape.MinY());
double minV, maxV;
cvMinMaxLoc(__t_m, &minV, &maxV);
cvConvertScale(__t_m, __t_m, 255/(maxV-minV), -minV*255/(maxV-minV));
byte* pimg;
double* fastt = __t_m->data.db;
minx = Shape.MinX(); miny = Shape.MinY();
maxx = Shape.MaxX(); maxy = Shape.MaxY();
if( minx < 0 )
minx = 0;
else if(minx >= image->width)
minx = image->width - 1;
if( miny < 0 )
miny = 0;
else if(miny >= image->height)
miny = image->height - 1;
if( maxx < 0 )
maxx = 0;
else if(maxx >= image->width)
maxx = image->height - 1;
if( maxy < 0 )
maxy = 0;
else if(maxy >= image->height)
maxy = image->height - 1;
for(int y = miny; y < maxy; y++)
{
y1 = y-miny;
pimg = (byte*)(image->imageData + image->widthStep*y);
for(int x = minx; x < maxx; x++)
{
x1 = x-minx;
idx1 = paw.__rect[y1][x1];
if(idx1 >= 0)
{
tri_idx = paw.PixTri(idx1);
v1 = paw.Tri(tri_idx, 0);
v2 = paw.Tri(tri_idx, 1);
v3 = paw.Tri(tri_idx, 2);
x2 = paw.__alpha[idx1]*refShape[v1].x + paw.__belta[idx1]*refShape[v2].x +
paw.__gamma[idx1]*refShape[v3].x;
y2 = paw.__alpha[idx1]*refShape[v1].y + paw.__belta[idx1]*refShape[v2].y +
paw.__gamma[idx1]*refShape[v3].y;
xby3 = 3*x;
idx2 = __cam.__paw.__rect[y2][x2]; idxby3 = 3*idx2;
pimg[xby3] = fastt[idxby3];
pimg[xby3+1] = fastt[idxby3+1];
pimg[xby3+2] = fastt[idxby3+2];
}
}
}
}
示例15: Fit
//============================================================================
int AAM_Basic::Fit(const IplImage* image, AAM_Shape& Shape,
int max_iter /* = 30 */,bool showprocess /* = false */)
{
//intial some stuff
double t = curtime;
double e1, e2, e3;
double k_v[6] = {-1,-1.15,-0.7,-0.5,-0.2,-0.0625};
Shape.Point2Mat(__current_s);
InitParams(image, __current_s, __current_c);
__cam.__shape.CalcParams(__current_s, __p, __current_q);
cvZero(__current_c);
IplImage* Drawimg =
cvCreateImage(cvGetSize(image), image->depth, image->nChannels);
//mkdir("result");
//char filename[100];
//calculate error
e3 = EstResidual(image, __current_c, __current_s, __delta_t);
if(e3 == -1) return 0;
int iter;
//do a number of iteration until convergence
for( iter = 0; iter <max_iter; iter++)
{
// predict pose and parameter update
// __delta_t rosszul számolódik. Kiiratás ld. AAM_Sahpe::Mat2Point()
//cvGEMM(__delta_t, __Rq, 1, NULL, 0, __delta_q, CV_GEMM_B_T);
cvGEMM(__delta_t, __Rc, 1, NULL, 0, __delta_c, CV_GEMM_B_T);
// if the prediction above didn't improve th fit,
// try amplify and later damp the prediction
for(int k = 0; k < 6; k++)
{
cvScaleAdd(__delta_q, cvScalar(k_v[k]), __current_q, __update_q);
cvScaleAdd(__delta_c, cvScalar(k_v[k]), __current_c, __update_c);
__cam.Clamp(__update_c);//constrain parameters
e2 = EstResidual(image, __update_c, __current_s, __delta_t);
if(k==0) e1 = e2;
else if(e2 != -1 && e2 < e1)break;
}
//check for convergence
if((iter>max_iter/3&&fabs(e2-e3)<0.01*e3) || e2<0.001 )
{
break;
}
else if (cvNorm(__delta_c)<0.001 && cvNorm(__delta_q)<0.001)
{
break;
}
else
{
cvCopy(__update_q, __current_q);
cvCopy(__update_c, __current_c);
e3 = e2;
}
}
__cam.CalcShape(__current_s, __current_c, __current_q);
Shape.Mat2Point(__current_s);
t = curtime - t;
if( AAM_DEBUG_MODE ) printf("AAM-Basic Fitting time cost: %.3f\n", t);
return iter;
}