当前位置: 首页>>代码示例>>C++>>正文


C++ VO_Shape::SetA2DPoint方法代码示例

本文整理汇总了C++中VO_Shape::SetA2DPoint方法的典型用法代码示例。如果您正苦于以下问题:C++ VO_Shape::SetA2DPoint方法的具体用法?C++ VO_Shape::SetA2DPoint怎么用?C++ VO_Shape::SetA2DPoint使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在VO_Shape的用法示例。


在下文中一共展示了VO_Shape::SetA2DPoint方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1:

/**
 * @author     JIA Pei
 * @version    2010-06-07
 * @brief      Constrain all points respetively
 * @param      ioShape     	Input and Output - the input and output shape
*/
void VO_Point2DDistributionModel::VO_ConstrainAllPoints(VO_Shape& ioShape)
{
    unsigned int NbOfPoints = ioShape.GetNbOfPoints();
    Point2f pt;

    for(unsigned int i = 0; i < NbOfPoints; i++)
    {
        pt = ioShape.GetA2DPoint(i);
        VO_Point2DDistributionModel::VO_ConstrainSinglePoint( pt, this->m_VONormalizedEllipses[i] );
        ioShape.SetA2DPoint(pt, i);
    }
}
开发者ID:haifaben,项目名称:vosm,代码行数:18,代码来源:VO_Point2DDistributionModel.cpp

示例2: VO_CMUInverseCompositional

/**
 * @author      Yao Wei
 * @brief       CMU Inverse Compositional !!
 * @param       - matDeltaP     Input -- deltap
 * @param       - matDeltaQ     Input -- deltaq
 * @param       - s             Input -- the shape
 * @param       - estShape      Output -- newly estimated shape by Inverse compositional
 */
void VO_FittingAAMInverseIA::VO_CMUInverseCompositional(const Mat_<float>& matDeltaP,
                                                        const Mat_<float>& matDeltaQ,
                                                        const VO_Shape& s,
                                                        VO_Shape& estShape)
{
    VO_Shape S0;
    this->VO_PParamQParam2ModelAlignedShape( matDeltaP, matDeltaQ, S0);
//    cvConvertScale(dpq, __inv_pq, -1);
//    __shape.CalcShape(__inv_pq, __update_s0);    // __update_s0 = N.W(s0, -delta_p, -delta_q)

    //Secondly: Composing the Incremental Warp with the Current Warp Estimate.
    Point2f res, tmp;
    int count = 0;
    vector<unsigned int> vertexIdxes;

    for(unsigned int i = 0; i < this->m_VOAAMInverseIA->m_iNbOfPoints; i++)
    {
        res.x = 0.0;    res.y = 0.0;
        count = 0;
        //The only problem with this approach is which triangle do we use?
        //In general there will be several triangles that share the i-th vertex.
        for(unsigned j = 0; j < this->m_VOAAMInverseIA->m_iNbOfTriangles; j++)    // see Figure (11)
        {
            if ( this->m_vTriangle2D[j].HasNode(i) )
            {
                vertexIdxes = this->m_vTriangle2D[j].GetVertexIndexes();

                VO_WarpingPoint::WarpOnePoint(  S0.GetA2DPoint(i),
                                                this->m_vTriangle2D[j], 
                                                tmp,
                                                s.GetA2DPoint(vertexIdxes[0]),
                                                s.GetA2DPoint(vertexIdxes[1]),
                                                s.GetA2DPoint(vertexIdxes[2]) );
                res.x += tmp.x;
                res.y += tmp.y;
                count++;
            }
        }
        // average the result so as to smooth the warp at each vertex
        if(count == 0)
            cerr << "There must be something wrong when CMU Inverse Compositional !" << endl;
        res.x /= count;
        res.y /= count;
        estShape.SetA2DPoint(res, i);
    }
}
开发者ID:cDoru,项目名称:face,代码行数:54,代码来源:VO_FittingAAMInverseIA.cpp

示例3: assert

// Estimate face absolute orientations
vector<float> CRecognitionAlgs::CalcAbsoluteOrientations(
    const VO_Shape& iShape2D,
    const VO_Shape& iShape3D,
    VO_Shape& oShape2D)
{
    assert (iShape2D.GetNbOfPoints() == iShape3D.GetNbOfPoints() );
    unsigned int NbOfPoints = iShape3D.GetNbOfPoints();
    Point3f pt3d;
    Point2f pt2d;
    float height1 = iShape2D.GetHeight();
    float height2 = iShape3D.GetHeight();
    VO_Shape tempShape2D = iShape2D;
    tempShape2D.Scale(height2/height1);

    //Create the model points
    std::vector<CvPoint3D32f> modelPoints;
    for(unsigned int i = 0; i < NbOfPoints; ++i)
    {
        pt3d = iShape3D.GetA3DPoint(i);
        modelPoints.push_back(cvPoint3D32f(pt3d.x, pt3d.y, pt3d.z));
    }

    //Create the image points
    std::vector<CvPoint2D32f> srcImagePoints;
    for(unsigned int i = 0; i < NbOfPoints; ++i)
    {
        pt2d = tempShape2D.GetA2DPoint(i);
        srcImagePoints.push_back(cvPoint2D32f(pt2d.x, pt2d.y));
    }

    //Create the POSIT object with the model points
    CvPOSITObject *positObject = cvCreatePOSITObject( &modelPoints[0], NbOfPoints );

    //Estimate the pose
    CvMatr32f rotation_matrix = new float[9];
    CvVect32f translation_vector = new float[3];
    CvTermCriteria criteria = cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 100, 1.0e-4f);
    cvPOSIT( positObject, &srcImagePoints[0], FOCAL_LENGTH, criteria, rotation_matrix, translation_vector );

    //rotation_matrix to Euler angles, refer to VO_Shape::GetRotation
    float sin_beta  = -rotation_matrix[0 * 3 + 2];
    float tan_alpha = rotation_matrix[1 * 3 + 2] / rotation_matrix[2 * 3 + 2];
    float tan_gamma = rotation_matrix[0 * 3 + 1] / rotation_matrix[0 * 3 + 0];

    //Project the model points with the estimated pose
    oShape2D = tempShape2D;
    for ( unsigned int i=0; i < NbOfPoints; ++i )
    {
        pt3d.x = rotation_matrix[0] * modelPoints[i].x +
            rotation_matrix[1] * modelPoints[i].y +
            rotation_matrix[2] * modelPoints[i].z +
            translation_vector[0];
        pt3d.y = rotation_matrix[3] * modelPoints[i].x +
            rotation_matrix[4] * modelPoints[i].y +
            rotation_matrix[5] * modelPoints[i].z +
            translation_vector[1];
        pt3d.z = rotation_matrix[6] * modelPoints[i].x +
            rotation_matrix[7] * modelPoints[i].y +
            rotation_matrix[8] * modelPoints[i].z +
            translation_vector[2];
        if ( pt3d.z != 0 )
        {
            pt2d.x = FOCAL_LENGTH * pt3d.x / pt3d.z;
            pt2d.y = FOCAL_LENGTH * pt3d.y / pt3d.z;
        }
        oShape2D.SetA2DPoint(pt2d, i);
    }

    //return Euler angles
    vector<float> pos(3);
    pos[0] = atan(tan_alpha);    // yaw
    pos[1] = asin(sin_beta);     // pitch
    pos[2] = atan(tan_gamma);    // roll
    return pos;
}
开发者ID:HVisionSensing,项目名称:mc-vosm,代码行数:76,代码来源:VO_RecognitionAlgs.cpp

示例4: UpdateShape

/**
 * @author      YAO Wei, JIA Pei
 * @version     2010-05-20
 * @brief       Find the best offset for one point
 * @param       asmmodel        Input - the ASM model
 * @param       iImg            Input - image to be fitted
 * @param       ioShape         Input and output - the input and output shape
 * @param       iShapeInfo      Input - the shape information
 * @param       iMean           Input - mean profile
 * @param       iCovInverse     Input - covariance inverse
 * @param       Lev             Input - current pyramid level
 * @param       offSetTolerance Input - offset tolerance, which is used to determine whether this point is convergede or not
 * @param       profdim         Input - specify the dimension that is going to be used when updating shape.
 *                              Sometimes, the trained data is of 4D profiles, but the user may only use 1D to test.
 * @note        Refer to "AAM Revisited, page 34, figure 13", particularly, those steps.
*/
int VO_FittingASMNDProfiles::UpdateShape(   const VO_ASMNDProfiles* asmmodel,
                                            const cv::Mat& iImg,
                                            VO_Shape& ioShape,
                                            const std::vector<VO_Shape2DInfo>& iShapeInfo,
                                            const std::vector< VO_Profile >& iMean,
                                            const std::vector< std::vector< cv::Mat_<float> > >& iCovInverse,
                                            unsigned int offSetTolerance,
                                            unsigned int profdim)
{
    int nGoodLandmarks = 0;
    std::vector<int> nBestOffset(profdim, 0);
    unsigned int NbOfPoints     = ioShape.GetNbOfPoints();
    unsigned int NbOfShapeDim   = ioShape.GetNbOfDim();
    unsigned int ProfileLength    = iMean[0].GetProfileLength();
    //std::vector<float> dists(NbOfPoints, 0.0f);
    cv::Point2f pt;

    // Take care of the 1st direction first.
    for (unsigned int i = 0; i < NbOfPoints; i++)
    {
        /////////////////////////////////////////////////////////////////////////////
        ///Calculate profile norm direction//////////////////////////////////////////
        /** Here, this is not compatible with 3D */
        cv::Point2f PrevPoint = ioShape.GetA2DPoint ( iShapeInfo[i].GetFrom() );
        cv::Point2f ThisPoint = ioShape.GetA2DPoint ( i );
        cv::Point2f NextPoint = ioShape.GetA2DPoint ( iShapeInfo[i].GetTo() );

        float deltaX, deltaY;
        float normX, normY;
        float sqrtsum;
        float bestXOffset, bestYOffset;

        // left side (connected from side)
        deltaX = ThisPoint.x - PrevPoint.x;
        deltaY = ThisPoint.y - PrevPoint.y;
        sqrtsum = sqrt ( deltaX*deltaX + deltaY*deltaY );
        if ( sqrtsum < FLT_EPSILON ) sqrtsum = 1.0f;
        deltaX /= sqrtsum; deltaY /= sqrtsum;         // Normalize
        // Firstly, normX normY record left side norm.
        normX = -deltaY;
        normY = deltaX;

        // right side (connected to side)
        deltaX = NextPoint.x - ThisPoint.x;
        deltaY = NextPoint.y - ThisPoint.y;
        sqrtsum = sqrt ( deltaX*deltaX + deltaY*deltaY );
        if ( sqrtsum < FLT_EPSILON ) sqrtsum = 1.0f;
        deltaX /= sqrtsum; deltaY /= sqrtsum;         // Normalize
        // Secondly, normX normY will average both left side and right side norm.
        normX += -deltaY;
        normY += deltaX;

        // Average left right side
        sqrtsum = sqrt ( normX*normX + normY*normY );
        if ( sqrtsum < FLT_EPSILON ) sqrtsum = 1.0f;
        normX /= sqrtsum;
        normY /= sqrtsum;                             // Final Normalize
        /////////////////////////////////////////////////////////////////////////////

        nBestOffset[0] = VO_FittingASMNDProfiles::VO_FindBestMatchingProfile1D( iImg,
                                                                                ThisPoint,
                                                                                iMean[i].Get1DimProfile(0),
                                                                                iCovInverse[i][0],
                                                                                ProfileLength,
                                                                                offSetTolerance,
                                                                                normX,
                                                                                normY);

        // set OutShape(iPoint) to best offset from current position
        // one dimensional profile: must move point along the whisker
        bestXOffset = nBestOffset[0] * normX;
        bestYOffset = nBestOffset[0] * normY;
        pt.x = ThisPoint.x + bestXOffset;
        pt.y = ThisPoint.y + bestYOffset;
        ioShape.SetA2DPoint(pt, i);
        //dists[i] = sqrt( pow( (double)bestXOffset, 2.0) + pow( (double)bestYOffset, 2.0) );

        //if (abs(nBestOffset[0]) <= offSetTolerance/2)
        if(profdim == 1)
        {
            if (abs(nBestOffset[0]) <= 1)
                nGoodLandmarks++;
        }
    }
//.........这里部分代码省略.........
开发者ID:jiapei100,项目名称:VOSM,代码行数:101,代码来源:VO_FittingASMNDProfiles.cpp


注:本文中的VO_Shape::SetA2DPoint方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。