本文整理汇总了C++中cvGetReal2D函数的典型用法代码示例。如果您正苦于以下问题:C++ cvGetReal2D函数的具体用法?C++ cvGetReal2D怎么用?C++ cvGetReal2D使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cvGetReal2D函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: memset
// Histogram Contrast Stretching
void Filterling ::contrastStretching (IplImage *srcImage, IplImage *dstImage) {
// histogram연산을 위해 사용할 배열메모리를 할당
unsigned int *histogramArray = new unsigned int[256],
*sumHistogramArray = new unsigned int[256] ;
memset (histogramArray, 0, sizeof (unsigned int) * 256) ;
memset (sumHistogramArray, 0, sizeof (unsigned int) * 256) ;
// 영상의 histogram을 계산
for (size_t i = 0 ; i < srcImage ->height ; i++)
for (size_t j = 0 ; j < srcImage ->width ; j++)
histogramArray[(unsigned int)cvGetReal2D (srcImage, i, j)]++ ;
// histogram의 정규화된 합을 계산
unsigned int sum = 0 ;
const float SCALE_FACTOR = 128.0f / (float)(srcImage ->height * srcImage ->width) ;
for (size_t i = 0 ; i < 256 ; i++) {
sum += histogramArray[i] ;
sumHistogramArray[i] = (unsigned int)((sum * SCALE_FACTOR) + 0.5) ;
}
// LUT로써 sumHistogramArray 배열을 사용하여 영상을 변환
for (size_t i = 0 ; i < srcImage ->height ; i++)
for (size_t j = 0 ; j < srcImage ->width ; j++)
// pixel indexing
cvSetReal2D (dstImage, i, j, (double) sumHistogramArray[(unsigned int)cvGetReal2D (srcImage, i, j)]) ;
delete [] (histogramArray) ;
delete [] (sumHistogramArray) ;
}
示例2: cvGetReal2D
//--------------------------------------------------------------
void fluid001::opticalFlowToFluid() {
int x, y;
float dx, dy;
float iw = 1.0f/flow.captureWidth;
float iy = 1.0f/flow.captureHeight;
int particleEmitCounter = 0;
float flowThreshold = 100;
float opticalFlowFluidMult = 0.001;
float multX = (float)ofGetWidth()/flow.captureWidth;
float multY = (float)ofGetHeight()/flow.captureHeight;
for ( y = 0; y < flow.captureHeight; y+=flow.captureRowsStep ){
for ( x = 0; x < flow.captureWidth; x+=flow.captureColsStep ){
dx = cvGetReal2D( flow.vel_x, y, x );
dy = cvGetReal2D( flow.vel_y, y, x );
if(dx*dx+dy*dy > flowThreshold) {
addToFluid((float)x/flow.captureWidth, (float)y/flow.captureHeight, dx*opticalFlowFluidMult, dy*opticalFlowFluidMult);
}
}
}
}
示例3: cvCreateMat
/**
* Test the Image
*/
float NN::test(IplImage* Image){
CvMat* Sample = cvCreateMat(1,IMGSIZE,CV_8U);
IplImage *resized = cvCreateImage(cvSize(IMGWIDTH,IMGHEIGHT),Image->depth,1);
cvResize(Image,resized,0);
cvReleaseImage(&Image);
for (int i=0; i< Image->width; i++)
for (int j=0; j< Image->height; j++)
cvSet2D(Sample,0,i*resized->width + j,cvGet2D(resized,i,j));
cvReleaseImage(&resized);
CvMat *output = cvCreateMat(1,Length,CV_32F);
NNModel.predict(Sample,output);
int min = 10000;
for (int i=0; i<Length;i++)
{
cout << cvGetReal2D(output,0,i) << " ";
if (min > cvGetReal2D(output,0,i))
min = i;
}
return min;
}
示例4:
void CamaraLucida::convertKKopencv2opengl(CvMat* opencvKK, float width, float height, float near, float far, float* openglKK)
{
float fx = (float)cvGetReal2D( opencvKK, 0, 0 );
float fy = (float)cvGetReal2D( opencvKK, 1, 1 );
float cx = (float)cvGetReal2D( opencvKK, 0, 2 );
float cy = (float)cvGetReal2D( opencvKK, 1, 2 );
float A = 2. * fx / width;
float B = 2. * fy / height;
float C = 2. * (cx / width) - 1.;
float D = 2. * (cy / height) - 1.;
float E = - (calib.far + calib.near) / (calib.far - calib.near);
float F = -2. * calib.far * calib.near / (calib.far - calib.near);
// col-major
openglKK[0]= A; openglKK[4]= 0.; openglKK[8]= C; openglKK[12]= 0.;
openglKK[1]= 0.; openglKK[5]= B; openglKK[9]= D; openglKK[13]= 0.;
openglKK[2]= 0.; openglKK[6]= 0.; openglKK[10]= E; openglKK[14]= F;
openglKK[3]= 0.; openglKK[7]= 0.; openglKK[11]= -1.; openglKK[15]= 0.;
// another solution by Kyle McDonald...
// https://github.com/kylemcdonald/ofxCv/blob/master/libs/ofxCv/src/Calibration.cpp
// glFrustum(
// nearDist * (-cx) / fx, nearDist * (w - cx) / fx,
// nearDist * (cy - h) / fy, nearDist * (cy) / fy,
// nearDist, farDist);
}
示例5: cvGetReal2D
bool ofxCvOpticalFlowLK::getBiggestFlowPoint(ofPoint *pos, ofPoint *vel) {
float sqrMax = 0;
float sqrMaxX = -1;
float sqrMaxY = -1;
for (int yy = 0; yy < captureHeight; yy+=2 ){
for (int xx = 0; xx < captureWidth; xx+=2 ){
float dx = cvGetReal2D( vel_x, yy, xx );
float dy = cvGetReal2D( vel_y, yy, xx );
float sqrDist = dx*dx + dy*dy;
if(sqrMax<sqrDist) {
sqrMaxX = xx;
sqrMaxY = yy;
sqrMax = sqrDist;
vel->x = dx;
vel->y = dy;
}
}
}
if(sqrMax>0) {
pos->x = sqrMaxX/captureWidth;
pos->y = sqrMaxY/captureHeight;
return true;
} else {
return false;
}
}
示例6: glLineWidth
void ofxCvOpticalFlowLK::draw(float x,float y,float w, float h) {
glLineWidth(1);
ofSetColor(0xffffff);
float speed;
int xx, yy, dx, dy;
float multW = w/captureWidth;
float multH = h/captureHeight;
glPushMatrix();
{
glTranslatef(x, y, 0);
glScalef(multW, multH, 1);
for ( yy = 0; yy < captureHeight; yy+=captureRowsStep ){
for ( xx = 0; xx < captureWidth; xx+=captureColsStep ){
dx = (int)cvGetReal2D( vel_x, yy, xx );
dy = (int)cvGetReal2D( vel_y, yy, xx );
//speed = dx * dx + dy * dy;
ofLine(xx, yy, xx+dx * 2, yy+dy * 2);
}
}
}
glPopMatrix();
}
示例7: cvGetReal2D
void Panoramic::ShowStitch(IplImage *profile,IplImage *center)
{
double profileFaceDate = 0.;
double Alpha = 0.5;//allocate middle of stich image a half of center and lateral image
double Blendling = 0.;
for(int j = 0;j < m_height;j++)
{
for(int i = 0;i < m_width;i++)
{
double linePosition = m_slope*i+m_intercept-j;
profileFaceDate = cvGetReal2D(profile,j,i);
if(linePosition <= -m_lineT)
{
cvSetReal2D(m_PanoramicFace,j,i,profileFaceDate);
}
else if(linePosition > -m_lineT && linePosition < m_lineT)
{
double getAlpha = LinearBlending(m_lineT,m_slope*i+m_intercept-j,Alpha);
double getBeta = 1-getAlpha;
Blendling = getAlpha*cvGetReal2D(center,j,i) + getBeta*profileFaceDate;
if(Blendling > 255) Blendling = 255;
else if(Blendling < 0)Blendling = 0;
cvSetReal2D(m_PanoramicFace,j,i,Blendling) ;
}
}
}
}
示例8: COG_edges
/*
* COG_edges
* find intersection of chalkline with either edge
*/
void COG_edges(float *Y_left, float *Y_right)
{
float intensity = 0.0, accum = 0.0;
int x, y;
*Y_left = 0.0;
*Y_right = 0.0;
for(x = 0; x < 40; x++)
{
for(y = 0; y < thresh->height; y++)
{
intensity = (float)(cvGetReal2D(thresh, y, x));
*Y_left += y*intensity;
accum += intensity;
}
}
*Y_left /= accum;
accum = 0.0;
for(x = thresh->width-40; x < thresh->width; x++)
{
for(y = 0; y < thresh->height; y++)
{
intensity = (float)(cvGetReal2D(thresh, y, x));
*Y_right += y*intensity;
accum += intensity;
}
}
*Y_right /= accum;
}
示例9: getVelAtNorm
//--------------------------------------------------------------
void openniTracking::getVelAtNorm(float x, float y, float *u, float *v) {
int ix = x * _width;
int iy = y * _height;
if(ix<0) ix = 0; else if(ix>=_width) ix = _width - 1;
if(iy<0) iy = 0; else if(iy>=_height) iy = _height - 1;
*u = cvGetReal2D( opticalFlow.getVelX(), iy, ix );
*v = cvGetReal2D( opticalFlow.getVelY(), iy, ix );
}
示例10: getVelAtNorm
void MotionTracker::getVelAtNorm(float x, float y, float *u, float *v) {
int ix = x * camWidth;
int iy = y * camHeight;
if(ix<0) ix = 0; else if(ix>=camWidth) ix = camWidth - 1;
if(iy<0) iy = 0; else if(iy>=camHeight) iy = camHeight - 1;
*u = cvGetReal2D( opticalFlow.vel_x, iy, ix );
*v = cvGetReal2D( opticalFlow.vel_y, iy, ix );
}
示例11: cvGetReal2D
ofPoint ofxCvOpticalFlowLK::flowAtPoint(int x, int y){
if(x >= captureWidth || x < 0 || y >= captureHeight || y < 0){
return 0.0f;
}
float fdx = cvGetReal2D( vel_x, y, x );
float fdy = cvGetReal2D( vel_y, y, x );
return ofPoint(fdx, fdy);
}
示例12: getBlockVel
//Get the velocity of each block given the x and y of that block
ofPoint ofxOpticalFlowBM :: getBlockVel( int x, int y ) {
ofPoint p;
if( x < flowSize.width && y < flowSize.height ) {
p.x = cvGetReal2D(opFlowVelX, y, x); //NOTE: y then x ... annoying
p.y = cvGetReal2D(opFlowVelY, y, x); //NOTE: y then x ... annoying
}
return p;
}
示例13: printf
void CamaraLucida::printM(CvMat* M, bool colmajor)
{
if (ofGetLogLevel() != OF_LOG_VERBOSE)
{
return;
}
int i,j;
if (colmajor)
{
for (i = 0; i < M->rows; i++)
{
printf("\n");
switch( CV_MAT_DEPTH(M->type) )
{
case CV_32F:
case CV_64F:
for (j = 0; j < M->cols; j++)
printf("%9.3f ", (float)cvGetReal2D( M, i, j ));
break;
case CV_8U:
case CV_16U:
for (j = 0; j < M->cols; j++)
printf("%6d",(int)cvGetReal2D( M, i, j ));
break;
default:
break;
}
}
}
else
{
for (j = 0; j < M->cols; j++)
{
printf("\n");
switch( CV_MAT_DEPTH(M->type) )
{
case CV_32F:
case CV_64F:
for (i = 0; i < M->rows; i++)
printf("%9.3f ", (float)cvGetReal2D( M, i, j ));
break;
case CV_8U:
case CV_16U:
for (i = 0; i < M->rows; i++)
printf("%6d",(int)cvGetReal2D( M, i, j ));
break;
default:
break;
}
}
}
printf("\n");
}
示例14: ofRectangle
void MotionTracker::getVelAverageComponents(float *u, float *v, ofRectangle* bounds){
bool cleanBounds = false;
if(bounds == NULL){
bounds = new ofRectangle(0, 0, optFlowWidth, optFlowHeight);
cleanBounds = true;
}
// normalize the bounds to optical flow system
bounds->x = (float)bounds->x/(float)camWidth*(float)optFlowWidth; // normalize co-ords from camera size to optical flow size
bounds->y = (float)bounds->y/(float)camHeight*(float)optFlowHeight;
bounds->width = (float)bounds->width/(float)camWidth*(float)optFlowWidth;
bounds->height = (float)bounds->height/(float)camHeight*(float)optFlowHeight;
// fix out of bounds
if(bounds->x < 0){
bounds->width += bounds->x;
bounds->x = 0;
}
else if(bounds->x + bounds->width > optFlowWidth){
bounds->width -= ((bounds->x + bounds->width) - optFlowWidth);
}
if(bounds->y < 0){
bounds->height += bounds->y;
bounds->y = 0;
}
else if(bounds->y + bounds->height > optFlowHeight){
bounds->height -= ((bounds->y + bounds->height) - optFlowHeight);
}
/** cout << "--bounds:norm--" << endl;
cout << "x:" << bounds->x << endl;
cout << "y:" << bounds->y << endl;
cout << "w:" << bounds->width << endl;
cout << "h:" << bounds->height << endl;
cout << "--------------------" << endl;*/
*u = 0.0;
*v = 0.0;
for(int i=bounds->x; i < bounds->x+(int)bounds->width; i++){
for(int j=bounds->y; j < bounds->y+(int)bounds->height; j++){
*u += cvGetReal2D( opticalFlow.vel_x, j, i);
*v += cvGetReal2D( opticalFlow.vel_y, j, i);
// cout << i << "," << j << "::" << *u << "," << *v << endl;
}
}
*u /= 2.0 * (float)bounds->height; // effectively: u / (width*height) * width/2.0
*v /= 2.0 * (float)bounds->width; // effectively: v / (width*height) * height/2.0
if(cleanBounds)
delete bounds;
}
示例15: dance_measurement
static void dance_measurement(const CvMat* x_k,
const CvMat* n_k,
CvMat* z_k)
{
cvSetReal2D(z_k, 0, 0, cvGetReal2D(x_k, 0, 0));
cvSetReal2D(z_k, 1, 0, cvGetReal2D(x_k, 1, 0));
/* as above, skip this step when n_k is null */
if(n_k)
{
cvAdd(z_k, n_k, z_k);
}
}