本文整理汇总了C++中cvMinMaxLoc函数的典型用法代码示例。如果您正苦于以下问题:C++ cvMinMaxLoc函数的具体用法?C++ cvMinMaxLoc怎么用?C++ cvMinMaxLoc使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cvMinMaxLoc函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: get_hand_interval_2
void get_hand_interval_2 (IplImage *body, int *interval)
{
CvMat *data, *labels, *means;
int count;
#define CLUSTERS 2
count = cvCountNonZero(body);
data = cvCreateMat(count, 1, CV_32FC1);
labels = cvCreateMat(count, 1, CV_32SC1);
means = cvCreateMat(CLUSTERS, 1, CV_32FC1);
fill_mat(body, data);
cvKMeans2(data, CLUSTERS, labels,
cvTermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 10, 10.0),
1, 0, 0, means, 0);
double tmp;
cvMinMaxLoc(body, &tmp, NULL, NULL, NULL, NULL);
interval[0] = tmp;
cvMinMaxLoc(means, &tmp, NULL, NULL, NULL, NULL);
interval[1] = tmp;
cvReleaseMat(&data);
cvReleaseMat(&labels);
}
示例2: asef_locate_eyes
void asef_locate_eyes(AsefEyeLocator *asef){
asef->face_image.cols = asef->face_rect.width;
asef->face_image.rows = asef->face_rect.height;
asef->face_image.type = CV_8UC1;
asef->face_image.step = asef->face_rect.width;
cvGetSubRect(asef->input_image, &asef->face_image, asef->face_rect);
double xscale = ((double)asef->scaled_face_image_8uc1->cols)/((double)asef->face_image.cols);
double yscale = ((double)asef->scaled_face_image_8uc1->rows)/((double)asef->face_image.rows);
cvResize(&asef->face_image, asef->scaled_face_image_8uc1, CV_INTER_LINEAR);
cvLUT(asef->scaled_face_image_8uc1, asef->scaled_face_image_32fc1, asef->lut);
cvDFT(asef->scaled_face_image_32fc1, asef->scaled_face_image_32fc1, CV_DXT_FORWARD, 0);
cvMulSpectrums(asef->scaled_face_image_32fc1, asef->lfilter_dft, asef->lcorr, CV_DXT_MUL_CONJ);
cvMulSpectrums(asef->scaled_face_image_32fc1, asef->rfilter_dft, asef->rcorr, CV_DXT_MUL_CONJ);
cvDFT(asef->lcorr, asef->lcorr, CV_DXT_INV_SCALE, 0);
cvDFT(asef->rcorr, asef->rcorr, CV_DXT_INV_SCALE, 0);
cvMinMaxLoc(asef->lroi, NULL, NULL, NULL, &asef->left_eye, NULL);
cvMinMaxLoc(asef->rroi, NULL, NULL, NULL, &asef->right_eye, NULL);
asef->left_eye.x = (asef->lrect.x + asef->left_eye.x)/xscale + asef->face_rect.x;
asef->left_eye.y = (asef->lrect.y + asef->left_eye.y)/yscale + asef->face_rect.y;
asef->right_eye.x = (asef->rrect.x + asef->right_eye.x)/xscale + asef->face_rect.x;
asef->right_eye.y = (asef->rrect.y + asef->right_eye.y)/yscale + asef->face_rect.y;
}
示例3: asef_locate_eyes
void asef_locate_eyes(AsefEyeLocator *asef, IplImage *image, CvRect face_rect, CvPoint *leye, CvPoint *reye){
asef->face_img.cols = face_rect.width;
asef->face_img.rows = face_rect.height;
asef->face_img.type = CV_8UC1;
asef->face_img.step = face_rect.width;
cvGetSubRect(image, &asef->face_img, face_rect);
double xscale = ((double)asef->image_tile->cols)/((double)asef->face_img.cols);
double yscale = ((double)asef->image_tile->rows)/((double)asef->face_img.rows);
cvResize(&asef->face_img, asef->image_tile, CV_INTER_LINEAR);
cvLUT(asef->image_tile, asef->image, asef->lut);
cvDFT(asef->image, asef->image, CV_DXT_FORWARD, 0);
cvMulSpectrums(asef->image, asef->lfilter_dft, asef->lcorr, CV_DXT_MUL_CONJ);
cvMulSpectrums(asef->image, asef->rfilter_dft, asef->rcorr, CV_DXT_MUL_CONJ);
cvDFT(asef->lcorr, asef->lcorr, CV_DXT_INV_SCALE, 0);
cvDFT(asef->rcorr, asef->rcorr, CV_DXT_INV_SCALE, 0);
cvMinMaxLoc(asef->lroi, NULL, NULL, NULL, leye, NULL);
cvMinMaxLoc(asef->rroi, NULL, NULL, NULL, reye, NULL);
leye->x = (asef->lrect.x + leye->x)/xscale + face_rect.x;
leye->y = (asef->lrect.y + leye->y)/yscale + face_rect.y;
reye->x = (asef->rrect.x + reye->x)/xscale + face_rect.x;
reye->y = (asef->rrect.y + reye->y)/yscale + face_rect.y;
}
示例4: cvGetImageROI
void CvOneWayDescriptor::EstimatePose(IplImage* patch, int& pose_idx, float& distance) const
{
distance = 1e10;
pose_idx = -1;
CvRect roi = cvGetImageROI(patch);
IplImage* patch_32f = cvCreateImage(cvSize(roi.width, roi.height), IPL_DEPTH_32F, patch->nChannels);
float sum = cvSum(patch).val[0];
cvConvertScale(patch, patch_32f, 1/sum);
for(int i = 0; i < m_pose_count; i++)
{
if(m_samples[i]->width != patch_32f->width || m_samples[i]->height != patch_32f->height)
{
continue;
}
float dist = cvNorm(m_samples[i], patch_32f);
//float dist = 0.0f;
//float i1,i2;
//for (int y = 0; y<patch_32f->height; y++)
// for (int x = 0; x< patch_32f->width; x++)
// {
// i1 = ((float*)(m_samples[i]->imageData + m_samples[i]->widthStep*y))[x];
// i2 = ((float*)(patch_32f->imageData + patch_32f->widthStep*y))[x];
// dist+= (i1-i2)*(i1-i2);
// }
if(dist < distance)
{
distance = dist;
pose_idx = i;
}
#if 0
IplImage* img1 = cvCreateImage(cvSize(roi.width, roi.height), IPL_DEPTH_8U, 1);
IplImage* img2 = cvCreateImage(cvSize(roi.width, roi.height), IPL_DEPTH_8U, 1);
double maxval;
cvMinMaxLoc(m_samples[i], 0, &maxval);
cvConvertScale(m_samples[i], img1, 255.0/maxval);
cvMinMaxLoc(patch_32f, 0, &maxval);
cvConvertScale(patch_32f, img2, 255.0/maxval);
cvNamedWindow("1", 1);
cvShowImage("1", img1);
cvNamedWindow("2", 1);
cvShowImage("2", img2);
printf("Distance = %f\n", dist);
cvWaitKey(0);
#endif
}
cvReleaseImage(&patch_32f);
}
示例5: convertFloatImageToUcharImage
// Get an 8-bit equivalent of the 32-bit Float image.
// Returns a new image, so remember to call 'cvReleaseImage()' on the result.
IplImage* convertFloatImageToUcharImage(const IplImage *srcImg)
{
IplImage *dstImg = 0;
if ((srcImg) && (srcImg->width > 0 && srcImg->height > 0)) {
// Spread the 32bit floating point pixels to fit within 8bit pixel range.
double minVal, maxVal;
cvMinMaxLoc(srcImg, &minVal, &maxVal);
//cout << "FloatImage:(minV=" << minVal << ", maxV=" << maxVal << ")." << endl;
// Deal with NaN and extreme values, since the DFT seems to give some NaN results.
if (cvIsNaN(minVal) || minVal < -1e30)
minVal = -1e30;
if (cvIsNaN(maxVal) || maxVal > 1e30)
maxVal = 1e30;
if (maxVal-minVal == 0.0f)
maxVal = minVal + 0.001; // remove potential divide by zero errors.
// Convert the format
dstImg = cvCreateImage(cvSize(srcImg->width, srcImg->height), 8, 1);
cvConvertScale(srcImg, dstImg, 255.0 / (maxVal - minVal), - minVal * 255.0 / (maxVal-minVal));
}
return dstImg;
}
示例6: gst_motiondetect_apply
static gboolean gst_motiondetect_apply (
IplImage * cvReferenceImage, const IplImage * cvCurrentImage,
const IplImage * cvMaskImage, float noiseThreshold)
{
IplConvKernel *kernel = cvCreateStructuringElementEx (3, 3, 1, 1,
CV_SHAPE_ELLIPSE, NULL);
int threshold = (int)((1 - noiseThreshold) * 255);
IplImage *cvAbsDiffImage = cvReferenceImage;
double maxVal = -1.0;
cvAbsDiff( cvReferenceImage, cvCurrentImage, cvAbsDiffImage );
cvThreshold (cvAbsDiffImage, cvAbsDiffImage, threshold, 255,
CV_THRESH_BINARY);
cvErode (cvAbsDiffImage, cvAbsDiffImage, kernel, 1);
cvReleaseStructuringElement(&kernel);
cvMinMaxLoc(cvAbsDiffImage, NULL, &maxVal, NULL, NULL, cvMaskImage );
if (maxVal > 0) {
return TRUE;
} else {
return FALSE;
}
}
示例7: main
int main( int argc, char** argv ) {
IplImage *src, *templ,*ftmp[6]; //ftmp will hold results
CvPoint minloc[6], maxloc[6];
double minval[6], maxval[6];
int i;
if( argc == 3){
//Read in the source image to be searched:
if((src=cvLoadImage(argv[1], 1))== 0) {
printf("Error on reading src image %s\n",argv[i]);
return(-1);
}
//Read in the template to be used for matching:
if((templ=cvLoadImage(argv[2], 1))== 0) {
printf("Error on reading template %s\n",argv[2]);
return(-1);
}
//ALLOCATE OUTPUT IMAGES:
int iwidth = src->width - templ->width + 1;
int iheight = src->height - templ->height + 1;
for(i=0; i<6; ++i){
ftmp[i] = cvCreateImage(
cvSize(iwidth,iheight),32,1);
}
//DO THE MATCHING OF THE TEMPLATE WITH THE IMAGE:218 | Chapter 7: Histograms and Matching Example 7-5. Template matching (continued)
for(i=0; i<6; ++i){
cvMatchTemplate( src, templ, ftmp[i], i);
//cvNormalize(ftmp[i],ftmp[i],1,0,CV_MINMAX);
cvMinMaxLoc(ftmp[i], &minval[i], &maxval[i], &minloc[i], &maxloc[i], 0);
std::cerr /*<< i << ":" << "minval: " << minval[i] \
<< " maxval: " << maxval[i] */ \
<< " minloc: " << minloc[i].x << ", " << minloc[i].y \
<< " maxloc: " << maxloc[i].x << ", " << maxloc[i].y;
std::cerr << "\n";
}
//DISPLAY
cvNamedWindow( "Template", 0 );
cvShowImage( "Template", templ );
cvNamedWindow( "Image", 0 );
cvShowImage( "Image", src );
cvNamedWindow( "SQDIFF", 0 );
cvShowImage( "SQDIFF", ftmp[0] );
cvNamedWindow( "SQDIFF_NORMED", 0 );
cvShowImage( "SQDIFF_NORMED", ftmp[1] );
cvNamedWindow( "CCORR", 0 );
cvShowImage( "CCORR", ftmp[2] );
cvNamedWindow( "CCORR_NORMED", 0 );
cvShowImage( "CCORR_NORMED", ftmp[3] );
cvNamedWindow( "CCOEFF", 0 );
cvShowImage( "CCOEFF", ftmp[4] );
cvNamedWindow( "CCOEFF_NORMED", 0 );
cvShowImage( "CCOEFF_NORMED", ftmp[5] );
//LET USER VIEW RESULTS:
cvWaitKey(0);
} else {
printf("Call should be:"
"matchTemplate image template \n");
}
}
示例8: LogMinMax
void LogMinMax(CvArr* mat,std::ostream& os)
{
//cvNormalize(gray,frame,1,0,CV_MINMAX);
double m, M;
cvMinMaxLoc(mat, &m, &M, NULL, NULL, NULL);
os<<m<<"\t"<<M<<std::endl;
}
示例9: ObtenerMaximo
double* ObtenerMaximo(IplImage* Imagen, STFrame* FrameData, CvRect Roi) {
// obtener matriz de distancias normalizadas al background
if (SHOW_VALIDATION_DATA == 1)
printf(" \n\n Busqueda del máximo umbral...");
IplImage* IDif = 0;
IplImage* peso = 0;
CvSize size = cvSize(Imagen->width, Imagen->height); // get current frame size
if (!IDif || IDif->width != size.width || IDif->height != size.height) {
cvReleaseImage(&IDif);
cvReleaseImage(&peso);
IDif = cvCreateImage(cvSize(FrameData->BGModel->width,
FrameData->BGModel->height), IPL_DEPTH_8U, 1); // imagen diferencia abs(I(pi)-u(p(i))
peso = cvCreateImage(cvSize(FrameData->BGModel->width,
FrameData->BGModel->height), IPL_DEPTH_32F, 1);//Imagen resultado wi ( pesos)
cvZero(IDif);
cvZero(peso);
}
// |I(p)-u(p)|/0(p)
cvAbsDiff(Imagen, FrameData->BGModel, IDif);
cvDiv(IDif, FrameData->IDesvf, peso);
// Buscar máximo
double* Maximo = 0;
cvMinMaxLoc(peso, Maximo, 0, 0, 0, FrameData->FG);
return Maximo;
}
示例10: main
int main(int argc, char** argv){
int i;
if(argc != 3){
printf("Error 1: 2 arguments expected, %d given.\n",argc-1);
return 0;
}
IplImage* source = cvLoadImage(argv[1],CV_LOAD_IMAGE_COLOR);
IplImage* tmpl = cvLoadImage(argv[2],CV_LOAD_IMAGE_COLOR);
int ww = source->width - tmpl->width + 1;
int hh = source->height - tmpl->height + 1;
IplImage *result = cvCreateImage(cvSize(ww,hh),IPL_DEPTH_32F, 1);//source->nChannels);
cvMatchTemplate(source, tmpl, result, CV_TM_SQDIFF);
CvPoint minLoc;
CvPoint maxLoc;
double minVal;
double maxVal;
cvMinMaxLoc( result, &minVal, &maxVal, &minLoc, &maxLoc, 0);
cvRectangle(source, minLoc, cvPoint(minLoc.x+tmpl->width,minLoc.y+tmpl->height),cvScalar(0,0,255,1),1,8,0);
cvNamedWindow("match",CV_WINDOW_AUTOSIZE);
cvShowImage("match",source);
cvWaitKey(0);
cvReleaseImage(&source);
cvReleaseImage(&tmpl);
cvReleaseImage(&result);
cvDestroyWindow("match");
}
示例11: cvBoundingRect
void MatchTemplatePlugin::ProcessStatic
( int i, ImagePlus *img, ImagePlus *oimg,
int method, CvSize winsize, IplImage* &map){
CvRect orect = cvBoundingRect(oimg->contourArray[i],1);
RestrictRectLoc(orect, cvRect(0,0,img->orig->width,img->orig->height));
cvSetImageROI(oimg->orig, orect);
CvRect rect = cvRect(MAX(0,orect.x-winsize.width), MAX(0,orect.y-winsize.height),orect.width+2*winsize.width, orect.height+2*winsize.height);
rect.width = MIN(rect.width,oimg->orig->width-rect.x);
rect.height = MIN(rect.height,oimg->orig->height-rect.y);
cvSetImageROI(img->orig, rect);
CvSize mapsize = MyPoint(MyPoint(rect)-MyPoint(orect)+wxPoint(1,1)).ToCvSize();
if (map && MyPoint(cvGetSize(map))!=MyPoint(mapsize))
cvReleaseImage(&map);
if( !map )
map = cvCreateImage(mapsize, IPL_DEPTH_32F, 1);
cvMatchTemplate( img->orig, oimg->orig, map, method );
cvResetImageROI(img->orig);
cvResetImageROI(oimg->orig);
CvPoint minloc;
CvPoint maxloc;
double minval, maxval;
cvMinMaxLoc( map, &minval, &maxval, &minloc, &maxloc);
bool minisbest = (method == CV_TM_SQDIFF || method==CV_TM_SQDIFF_NORMED);
rect.x = rect.x + (minisbest ? minloc.x : maxloc.x);
rect.y = rect.y + (minisbest ? minloc.y : maxloc.y);
CvPoint shift = cvPoint(rect.x - orect.x, rect.y - orect.y);
ShiftContour(oimg->contourArray[i],img->contourArray[i],shift);
ShiftFeatPoints(oimg->feats[i], img->feats[i], cvPointTo32f(shift));
}
示例12: cvAvg
void thresholdCalculator::calculateAverages(ofxCvGrayscaleAdvanced & smallCurrentImg, ofxCvGrayscaleAdvanced & maskImg, ofRectangle & targetRect) {
roi.x = targetRect.x / divisor;
roi.y = targetRect.y / divisor;
maskImg.setROI(roi);
smallCurrentImg.setROI(roi);
CvScalar tempPupilAvg = cvAvg(smallCurrentImg.getCvImage(), maskImg.getCvImage());
cvNot(maskImg.getCvImage(), notDiffImg.getCvImage());
pupilAvg = tempPupilAvg.val[0];
// get average of pupil black iteratively(get average twice) to remove the influence of glint
cvThreshold(smallCurrentImg.getCvImage(), farFromAvg, pupilAvg + 30, 255, CV_THRESH_BINARY); // 30 is the distance from average.
cvSub(maskImg.getCvImage(), farFromAvg, newMask); // make a mask to get rid of those far points.
CvScalar newPupilAvg = cvAvg(smallCurrentImg.getCvImage(), newMask); // get new average value.
// get average, min and max value of white area of an eye.
CvScalar tempWhiteAvg = cvAvg(smallCurrentImg.getCvImage(), notDiffImg.getCvImage());
for (int i = 0; i < 6; i++) notDiffImg.erode(); // this might be very useful to reduce the influence of small noise & glint
cvMinMaxLoc(smallCurrentImg.getCvImage(), &whiteMin, &whiteMax, &whiteLocMin, &whiteLocMax, notDiffImg.getCvImage());
maskImg.resetROI();
smallCurrentImg.resetROI();
pupilAvg = newPupilAvg.val[0]; // value is in the first element of CvScalar
whiteAvg = tempWhiteAvg.val[0];
}
示例13: cvSize
ofPoint matchFinder::getPoint() {
// get the size for our result image
CvSize result_size = cvSize(input.getWidth() - tpl.getWidth() + 1,
input.getHeight() - tpl.getHeight() + 1);
// create the result image for the comparison
IplImage *result_image = cvCreateImage(result_size, IPL_DEPTH_32F, 1);
// make the comparison
cvMatchTemplate(input.getCvImage(), tpl.getCvImage(), result_image, CV_TM_SQDIFF);
// copy to ofCv image.
IplImage *result_char = cvCreateImage(cvSize(result_image->width, result_image->height), 8, 1);
ofcv_result_image.allocate(result_size.width, result_size.height);
ofcv_result_image = result_char;
// get the location of the best match
CvPoint min_loc;
CvPoint max_loc;
cvMinMaxLoc(result_image, &min_val, &max_val, &min_loc, &max_loc, 0);
// clean up
cvReleaseImage(&result_image);
// return value
ofPoint p = ofPoint(min_loc.x, min_loc.y);
return p;
}
示例14: cvGetMat
DOUBLEVECT HoughAccumulator::FindBest()
{
DOUBLEVECT v;
CvMat temp;
CvMat* locMat = cvGetMat(acc, &temp, NULL, 1);
// int rowsize = 4 * ((acc->dims / 4) +
// ((acc->dims % 4 > 0) ? 1 : 0));
double max_val;
CvPoint max_loc;
cvMinMaxLoc(locMat, NULL, &max_val, NULL, &max_loc, NULL);
int indraw = max_loc.x + max_loc.y * locMat->step;
uchar* pValue = cvPtr2D(locMat, max_loc.y, max_loc.x);
if (*pValue < 10)
return v;
indices[0] = indraw / acc->dim[0].step;
indices[acc->dims - 1] = indraw % acc->dim[acc->dims - 2].step;
for (int i = 1; i < acc->dims - 1; i ++)
indices[i] = (indraw % acc->dim[i - 1].step) / acc->dim[i].step;
for (int j = 0; j < acc->dims; j++)
{
double d = indices[j] / (float)precision + paramRanges[j].min;
v.push_back(d);
}
return v;
}
示例15: CVImage
void Convert32FTo8U::execute() {
CVImage* cvimg = cvImageIn.getBuffer();
if(!cvimg) { if(debug) std::cerr << getName() << "::ERROR::cvImageIn is NULL!\n"; cvImageOut.setBuffer(NULL); cvImageOut.out(); return; }
if(cvimg->cvMatType != CV_32FC1) { if(debug) std::cerr << getName() << "::ERROR::cvImageIn has incorrect type (must be CV_32FC1)!\n"; cvImageOut.setBuffer(NULL); cvImageOut.out(); return; }
if(!mp_cvimg8u){
//mp_cvimg32f = new CVImage(cvSize(cvimg->width, cvimg->height), CV_32FC1, 0);
mp_cvimg8u = new CVImage(cvSize(cvimg->width, cvimg->height), CV_8UC1, 0);
}
IplImage* img = cvimg->ipl;
double minval, maxval;
cvMinMaxLoc(img, &minval, &maxval, NULL, NULL, NULL);
double scale, shift;
if(maxval == minval) { scale = 255.0; shift = 0.0; }
else {
scale = 255.0 / (maxval - minval);
shift = - minval * scale;
}
cvConvertScale(img, mp_cvimg8u->ipl, scale, shift);
cvImageOut.setBuffer(mp_cvimg8u);
cvImageOut.out();
}