本文整理汇总了C++中cvPoint2D32f函数的典型用法代码示例。如果您正苦于以下问题:C++ cvPoint2D32f函数的具体用法?C++ cvPoint2D32f怎么用?C++ cvPoint2D32f使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cvPoint2D32f函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: AffineTransformPatch
void AffineTransformPatch(IplImage* src, IplImage* dst, CvAffinePose pose)
{
CvRect src_large_roi = cvGetImageROI(src);
IplImage* temp = cvCreateImage(cvSize(src_large_roi.width, src_large_roi.height), IPL_DEPTH_32F, src->nChannels);
cvSetZero(temp);
IplImage* temp2 = cvCloneImage(temp);
CvMat* rotation_phi = cvCreateMat(2, 3, CV_32FC1);
CvSize new_size = cvSize(temp->width*pose.lambda1, temp->height*pose.lambda2);
IplImage* temp3 = cvCreateImage(new_size, IPL_DEPTH_32F, src->nChannels);
cvConvertScale(src, temp);
cvResetImageROI(temp);
cv2DRotationMatrix(cvPoint2D32f(temp->width/2, temp->height/2), pose.phi, 1.0, rotation_phi);
cvWarpAffine(temp, temp2, rotation_phi);
cvSetZero(temp);
cvResize(temp2, temp3);
cv2DRotationMatrix(cvPoint2D32f(temp3->width/2, temp3->height/2), pose.theta - pose.phi, 1.0, rotation_phi);
cvWarpAffine(temp3, temp, rotation_phi);
cvSetImageROI(temp, cvRect(temp->width/2 - src_large_roi.width/4, temp->height/2 - src_large_roi.height/4,
src_large_roi.width/2, src_large_roi.height/2));
cvConvertScale(temp, dst);
cvReleaseMat(&rotation_phi);
cvReleaseImage(&temp3);
cvReleaseImage(&temp2);
cvReleaseImage(&temp);
}
示例2: the_car
void the_project::project_init()
{
car_of_pro = new the_car();
//camera 480*640
for_cam = cvCreateCameraCapture(1);
for_video = cvCreateFileCapture("test.avi");
image_size = cvSize(cvGetCaptureProperty(for_cam,3),cvGetCaptureProperty(for_cam,4));
wr1 = cvCreateVideoWriter("record_ori.avi",CV_FOURCC('X','V','I','D') ,15,image_size);
wr2 = cvCreateVideoWriter("record_cha.avi",CV_FOURCC('X','V','I','D') ,15,image_size);
newpoints[0]=cvPoint2D32f(0,0);
newpoints[1]=cvPoint2D32f(0,image_size.height);
newpoints[2]=cvPoint2D32f(image_size.width,image_size.height);
newpoints[3]=cvPoint2D32f(image_size.width,0);
red_min=200;
rg_max=100;
rb_max=100;
green_min=200;
gb_max=100;
gr_max=100;
}
示例3: GenerateAffineTransformFromPose
void GenerateAffineTransformFromPose(CvSize size, CvAffinePose pose, CvMat* transform)
{
CvMat* temp = cvCreateMat(3, 3, CV_32FC1);
CvMat* final = cvCreateMat(3, 3, CV_32FC1);
cvmSet(temp, 2, 0, 0.0f);
cvmSet(temp, 2, 1, 0.0f);
cvmSet(temp, 2, 2, 1.0f);
CvMat rotation;
cvGetSubRect(temp, &rotation, cvRect(0, 0, 3, 2));
cv2DRotationMatrix(cvPoint2D32f(size.width/2, size.height/2), pose.phi, 1.0, &rotation);
cvCopy(temp, final);
cvmSet(temp, 0, 0, pose.lambda1);
cvmSet(temp, 0, 1, 0.0f);
cvmSet(temp, 1, 0, 0.0f);
cvmSet(temp, 1, 1, pose.lambda2);
cvmSet(temp, 0, 2, size.width/2*(1 - pose.lambda1));
cvmSet(temp, 1, 2, size.height/2*(1 - pose.lambda2));
cvMatMul(temp, final, final);
cv2DRotationMatrix(cvPoint2D32f(size.width/2, size.height/2), pose.theta - pose.phi, 1.0, &rotation);
cvMatMul(temp, final, final);
cvGetSubRect(final, &rotation, cvRect(0, 0, 3, 2));
cvCopy(&rotation, transform);
cvReleaseMat(&temp);
cvReleaseMat(&final);
}
示例4: ofxSurfObjCorners
int ofxSurfObjCorners(IpPairVec & matches,const ofPoint src_crn[4],ofPoint dst_crn[4]) {
double h[9];
CvMat _h = cvMat(3,3,CV_64F,h);
vector<CvPoint2D32f> pt1,pt2;
CvMat _pt1,_pt2;
int n = (int)(matches.size());
if(n<4)return 0;
pt1.resize(n);
pt2.resize(n);
for(int i=0; i<n; i++) {
pt1[i] = cvPoint2D32f(matches[i].second.x,matches[i].second.y);
pt2[i] = cvPoint2D32f(matches[i].first.x,matches[i].first.y);
}
_pt1 = cvMat(1,n,CV_32F,&pt1[0]);
_pt2 = cvMat(1,n,CV_32F,&pt2[0]);
//if(!cvFindHomography(&_pt1,&_pt2,&_h,CV_RANSAC,5))return 0;
/*for(int i=0;i<4;i++){
double x = (double)src_crn[i].x;
double y = (double)src_crn[i].y;
double Z = 1./(h[6]*x + h[7]*y + h[8]);
double X = (h[0]*x + h[1]*y + h[2])*Z;
double Y = (h[3]*x + h[4]*y + h[5])*Z;
dst_crn[i].set(cvRound(X),cvRound(Y));
}*/
return 1;
}
示例5: cvPoint2D32f
void SimpleImageProjector::project(IplImage* dst, A4PreciseDetectedRecord dstRecord)
{
CvPoint2D32f dstCorners[4];
dstCorners[0] = cvPoint2D32f(dstRecord.UL.x, dstRecord.UL.y);
dstCorners[1] = cvPoint2D32f(dstRecord.UR.x, dstRecord.UR.y);
dstCorners[2] = cvPoint2D32f(dstRecord.DL.x, dstRecord.DL.y);
dstCorners[3] = cvPoint2D32f(dstRecord.DR.x, dstRecord.DR.y);
cvWarpPerspective(projection, dst, transformMat, CV_INTER_LINEAR);
}
示例6: cameraInfoCb
void cameraInfoCb(const sensor_msgs::CameraInfoConstPtr& msg){
cameraInfo.focalLength = cvPoint2D32f(msg->K.elems[0], msg->K.elems[4]);
cameraInfo.imageHeight = msg->height;
cameraInfo.imageWidth = msg->width;
cameraInfo.pitch = (1.0)*3.14/180;
cameraInfo.yaw = 0 ;
cameraInfo.opticalCenter = cvPoint2D32f(msg->K.elems[2], msg->K.elems[5]);
cameraInfo.cameraHeight = 1430;
//camera_sub_.shutdown();
}
示例7: cvPointFrom32f
void Lines::Line::drawInfiniteLine(IplImage* img, CvScalar color)
{
CvPoint pt1 = cvPointFrom32f(cvPoint2D32f(-1000, -1000*Slope + Intercept));
CvPoint pt2 = cvPointFrom32f(cvPoint2D32f(1000, 1000*Slope + Intercept));
if (isVertical == false)
cvLine(img, pt1, pt2, color, 1, 8, 0);
else
cvLine(img, cvPoint(point1.x, 0), cvPoint(point1.x, 1000), color, 1, 8, 0);
}
示例8: cvLoadImage
SimpleImageProjector::SimpleImageProjector(char* pathToProjection)
{
projection = cvLoadImage(pathToProjection);
if (projection == nullptr) {
throw std::exception("Can not load projection image");
}
transformMat = cvCreateMat(3, 3, CV_32FC1);
corners[0] = cvPoint2D32f(0, 0);
corners[1] = cvPoint2D32f(projection->width-1, 0);
corners[2] = cvPoint2D32f(0, projection->height-1);
corners[3] = cvPoint2D32f(projection->width-1, projection->height-1);
}
示例9: cvInitSubdivDelaunay2D
CV_IMPL void
cvInitSubdivDelaunay2D( CvSubdiv2D * subdiv, CvRect rect )
{
float big_coord = 3.f * MAX( rect.width, rect.height );
CvPoint2D32f ppA, ppB, ppC;
CvSubdiv2DPoint *pA, *pB, *pC;
CvSubdiv2DEdge edge_AB, edge_BC, edge_CA;
float rx = (float) rect.x;
float ry = (float) rect.y;
CV_FUNCNAME( "cvSubdivDelaunay2DInit" );
__BEGIN__;
if( !subdiv )
CV_ERROR( CV_StsNullPtr, "" );
cvClearSet( (CvSet *) (subdiv->edges) );
cvClearSet( (CvSet *) subdiv );
subdiv->quad_edges = 0;
subdiv->recent_edge = 0;
subdiv->is_geometry_valid = 0;
subdiv->topleft = cvPoint2D32f( rx, ry );
subdiv->bottomright = cvPoint2D32f( rx + rect.width, ry + rect.height );
ppA = cvPoint2D32f( rx + big_coord, ry );
ppB = cvPoint2D32f( rx, ry + big_coord );
ppC = cvPoint2D32f( rx - big_coord, ry - big_coord );
pA = cvSubdiv2DAddPoint( subdiv, ppA, 0 );
pB = cvSubdiv2DAddPoint( subdiv, ppB, 0 );
pC = cvSubdiv2DAddPoint( subdiv, ppC, 0 );
edge_AB = cvSubdiv2DMakeEdge( subdiv );
edge_BC = cvSubdiv2DMakeEdge( subdiv );
edge_CA = cvSubdiv2DMakeEdge( subdiv );
cvSubdiv2DSetEdgePoints( edge_AB, pA, pB );
cvSubdiv2DSetEdgePoints( edge_BC, pB, pC );
cvSubdiv2DSetEdgePoints( edge_CA, pC, pA );
cvSubdiv2DSplice( edge_AB, cvSubdiv2DSymEdge( edge_CA ));
cvSubdiv2DSplice( edge_BC, cvSubdiv2DSymEdge( edge_AB ));
cvSubdiv2DSplice( edge_CA, cvSubdiv2DSymEdge( edge_BC ));
subdiv->recent_edge = edge_AB;
__END__;
}
示例10: glLoadIdentity
void StereoDisplay::Draw() // Draw Our Scene
{
IplImage* camera_image;
GLfloat z=-20.0;
if( show_right_ == TRUE)
camera_image = camera1_->QueryFrame();
else
camera_image = camera0_->QueryFrame();
show_right_ = !show_right_;
glLoadIdentity(); // Reset The Modelview Matrix
glBegin(GL_QUADS); // Begin drawing the image texture
// Front Face
glTexCoord2f(1.0f, 1.0f); glVertex3f( 11.0f, 8.3f, z);
glTexCoord2f(0.0f, 1.0f); glVertex3f(-11.0f, 8.3f, z);
glTexCoord2f(0.0f, 0.0f); glVertex3f(-11.0f, -8.3f, z);
glTexCoord2f(1.0f, 0.0f); glVertex3f( 11.0f, -8.3f, z);
glEnd(); // Done drawing texture
glFlush (); // Flush The GL Rendering Pipeline
if( true == recording_ )
{
cvLogPolar( camera_image, logpolarframe_,
cvPoint2D32f(camera_image->width/2,camera_image->height/2),
40, CV_INTER_LINEAR + CV_WARP_FILL_OUTLIERS);
cvWriteFrame( writer_, logpolarframe_);
}
}
示例11: TransformVector
void SCSM::TransformPoint(int angle, float scale, CvPoint orig_point,
CvPoint &transform_point, CvPoint2D32f &transform_vector) {
double origvec_x = static_cast<double>(orig_point.x - fixedCenter.x);
double origvec_y = static_cast<double>(orig_point.y - fixedCenter.y);
TransformVector(angle, scale, cvPoint2D32f(origvec_x, origvec_y),
transform_point, transform_vector);
}
示例12: CalcFourierDescriptorCoeff
void CalcFourierDescriptorCoeff(CvSeq* seq_pts, int n_fourier,CvSeq* seq_fourier)
{
int count = seq_pts->total;
double *coeff_cos, *coeff_sin;
coeff_cos = (double*)malloc(count*sizeof(double));
coeff_sin = (double*)malloc(count*sizeof(double));
int i;
for(i = 0; i < count; i++)
{
coeff_sin[i] = sin(2*i*CV_PI/count);
coeff_cos[i] = cos(2*i*CV_PI/count);
}
cvClearSeq(seq_fourier);
for(int u = 0; u < n_fourier; u++)
{
CvPoint2D32f point_coeff = cvPoint2D32f(0, 0);
for(i = 0; i < count; i+=4)
{
CvPoint* pt = (CvPoint*)cvGetSeqElem(seq_pts, i);
point_coeff.x += (float)(pt->x*coeff_cos[(i*u)%count] + pt->y*coeff_sin[(i*u)%count]);
point_coeff.y += (float)(pt->y*coeff_cos[(i*u)%count] - pt->x*coeff_sin[(i*u)%count]);
}
//point_coeff.x/=count;
//point_coeff.y/=count;
cvSeqPush(seq_fourier, &point_coeff);
}
free(coeff_cos);
free(coeff_sin);
}
示例13: assert
void moCalibrationModule::triangulate() {
// We first triangulate all the surfacePoints.
// Afterwards, in transform mode when a new touch occurrs, we can
// simply look up the triangle in which the touch was performed
// and get the barycentric parameters of the touch in that triangle.
// We then use these to compute the on screen coordinate of the touch.
moPointList screenPoints = this->property("screenPoints").asPointList();
moPointList surfacePoints = this->property("surfacePoints").asPointList();
assert(screenPoints.size() == surfacePoints.size());
this->delaunayToScreen.clear();
this->subdiv = cvCreateSubdivDelaunay2D(this->rect, this->storage);
//add all the surfacepoints we collected to the subdivision
//use the delaunayToScreen map to associate them with corrosponding screen point
moPointList::iterator it, its;
for(it = surfacePoints.begin(), its = screenPoints.begin(); it != surfacePoints.end(); it++, its++) {
CvPoint2D32f fp = cvPoint2D32f(it->x, it->y);
CvSubdiv2DPoint *delaunayPoint = cvSubdivDelaunay2DInsert(subdiv, fp);
this->delaunayToScreen[delaunayPoint] = (*its);
}
this->retriangulate = false;
this->notifyGui();
}
示例14: rb_center
/*
* Return center point of rectangle.
*/
VALUE
rb_center(VALUE self)
{
CvRect *rect = CVRECT(self);
return cCvPoint2D32f::new_object(cvPoint2D32f((float)rect->x + (float)rect->width / 2.0,
(float)rect->y + (float)rect->height / 2.0));
}
示例15: main
int main(int argc, const char * argv[]) {
CvCapture* capture = cvCreateFileCapture( argv[1] );
if (!capture) return -1;
IplImage* bgr_frame = cvQueryFrame( capture );
double fps = cvGetCaptureProperty( capture , CV_CAP_PROP_FPS );
CvSize size = cvSize(
(int)cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH),
(int)cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT)
);
CvVideoWriter* writer = cvCreateVideoWriter( argv[2], CV_FOURCC('M', 'J', 'P', 'G'), fps, size);
IplImage* logpolar_frame = cvCreateImage(size, IPL_DEPTH_8U, 3);
while ( (bgr_frame = cvQueryFrame(capture)) != NULL ) {
cvLogPolar(bgr_frame, logpolar_frame,
cvPoint2D32f(bgr_frame->width/2, bgr_frame->height/2),
40,
CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS );
cvWriteFrame(writer, logpolar_frame);
}
cvReleaseVideoWriter( &writer );
cvReleaseImage( &logpolar_frame );
cvReleaseCapture( &capture );
return 0;
}