本文整理汇总了C++中cvConvertScale函数的典型用法代码示例。如果您正苦于以下问题:C++ cvConvertScale函数的具体用法?C++ cvConvertScale怎么用?C++ cvConvertScale使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cvConvertScale函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: cvWaitKey
string MCRenderer::response(const IplImage* currentImage)
{
int key = cvWaitKey(10);
switch(key)
{
case 's':
if(currentImage)
{
IplImage *temp = cvCreateImage(cvSize(currentImage->width, currentImage->height), IPL_DEPTH_32F, 3);
cvConvertScale(currentImage, temp, 1);
saveImagePFM(savePath, temp);
cvReleaseImage(&temp);
}
break;
case 'q':
return "quit";
}
return "";
}
示例2: optimizeDepthMap
bool optimizeDepthMap()
{
cvErode(uImage,uImage,0,2); //Smoothen the User Map as well
cvDilate(uImage,uImage,0,2);
CvScalar depthMean=cvAvg(dImage,uImage); //Get teh Average Depth Value of the User Pixels
cvNot(uImage,uImage); //Invert the user pixels to paint the rest of the image with average user depth
//viewImage(dImage);
cvSet(dImage,depthMean,uImage);
IplImage* tempImage=cvCreateImage(dSize,IPL_DEPTH_8U,1);
cvConvertScale(dImage,tempImage,1.0/256);
cvSmooth(tempImage,tempImage,CV_GAUSSIAN,7);//Perform Gaussian Smoothing, depth map is optimized.
cvConvert(tempImage,dImage);
cvScale(dImage,dImage,256);
cvSet(dImage,cvScalar(0),uImage);
//viewImage(dImage);
//cvSmooth(dImage,dImage,CV_GAUSSIAN,gaussian_m,gaussian_n,gaussian_e);//Perform Gaussian Smoothing, depth map is optimized.
cvNot(uImage,uImage);
cvReleaseImage(&tempImage);
return true;
}
示例3: cvCreateImage
IplImage *get_gray(const IplImage *img) {
if (!img) {
return NULL;
}
IplImage *gray8, *gray32;
gray32 = cvCreateImage(cvGetSize(img), IPL_DEPTH_32F, 1);
if (img->nChannels == 1) {
gray8 = (IplImage *)cvClone(img);
} else {
gray8 = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
cvCvtColor(img, gray8, CV_BGR2GRAY);
}
cvConvertScale(gray8, gray32, 1.0 / 255.0, 0);
cvReleaseImage(&gray8);
return gray32;
}
示例4: cvCreateImage
/* Create a camshift tracked object from a region in image. */
TrackedObj* FaceBl0r::create_tracked_object (IplImage* image, CvRect* region) {
TrackedObj* obj;
//allocate memory for tracked object struct
if((obj = (TrackedObj *) malloc(sizeof *obj)) != NULL) {
//create-image: size(w,h), bit depth, channels
obj->hsv = cvCreateImage(cvGetSize(image), 8, 3);
obj->mask = cvCreateImage(cvGetSize(image), 8, 1);
obj->hue = cvCreateImage(cvGetSize(image), 8, 1);
obj->prob = cvCreateImage(cvGetSize(image), 8, 1);
int hist_bins = 30; //number of histogram bins
float hist_range[] = {0,180}; //histogram range
float* range = hist_range;
obj->hist = cvCreateHist(1, //number of hist dimensions
&hist_bins, //array of dimension sizes
CV_HIST_ARRAY, //representation format
&range, //array of ranges for bins
1); //uniformity flag
}
//create a new hue image
update_hue_image(image, obj);
float max_val = 0.f;
//create a histogram representation for the face
cvSetImageROI(obj->hue, *region);
cvSetImageROI(obj->mask, *region);
cvCalcHist(&obj->hue, obj->hist, 0, obj->mask);
cvGetMinMaxHistValue(obj->hist, 0, &max_val, 0, 0 );
cvConvertScale(obj->hist->bins, obj->hist->bins,
max_val ? 255.0/max_val : 0, 0);
cvResetImageROI(obj->hue);
cvResetImageROI(obj->mask);
//store the previous face location
obj->prev_rect = *region;
return obj;
}
示例5: bgr2hsv
/**
* @brief Convierte una imágen RGB a HSV
* @param bgr La imágen original en RGB
* @param objFRAME El objeto OpenCL asociado a la imágen RGB usado en el kernel
* @param context El contexto de dispositivos OpenCL
* @param kernelHSV El kernel OpenCL que se debe ejecutar para calcular la imágen HSV
* @param command_queue La cola del dispositivo OpenCL
* @param work_items El número de unidades de cómputo a usar en el cálculo
* @return Una nueva imágen ya en HSV de 32-bit con S y V en el rango [0,1] y H en [0,360]
*/
IplImage* bgr2hsv( IplImage *bgr, cl_mem *objFRAME, cl_context *context, cl_kernel *kernelHSV, cl_command_queue *command_queue, size_t work_items ) {
cl_int ret;
IplImage *bgr32f;
bgr32f = cvCreateImage( cvGetSize(bgr), IPL_DEPTH_32F, 3 );
cvConvertScale( bgr, bgr32f, 1.0 / 255.0, 0 );
ret = clEnqueueWriteBuffer(command_queue[0], objFRAME[0], CL_FALSE, 0, bgr32f->imageSize, bgr32f->imageData, 0, NULL, NULL);
// Establecemos los argumentos del kernel
ret = clSetKernelArg(kernelHSV[0], 0, sizeof(cl_mem), (void *)objFRAME);
ret = clSetKernelArg(kernelHSV[0], 1, sizeof(int), &bgr32f->widthStep);
ret = clSetKernelArg(kernelHSV[0], 2, sizeof(int), &bgr32f->height);
ret = clSetKernelArg(kernelHSV[0], 3, sizeof(int), &bgr32f->width);
size_t local = 128;
size_t global = work_items * local;
// Ejecutamos el kernel como paralelismo de datos
ret = clEnqueueNDRangeKernel(command_queue[0], kernelHSV[0], 1, NULL, &global, &local, 0, NULL, NULL);
return bgr32f;
}
示例6: BOCV_Mask_attach
/**
* Create Mask from comp buf node
* @param cbuf
* @return IplImage of Mask
*/
IplImage* BOCV_Mask_attach(CompBuf* cbuf)
{
IplImage *mask;
IplImage *img;
if(cbuf == NULL)
return NULL;
if(cbuf->x>0 && cbuf->y>0 ){
//Create image from comp buf
img = cvCreateImageHeader(cvSize(cbuf->x,cbuf->y),IPL_DEPTH_32F,cbuf->type);
cvSetData(img,cbuf->rect,cbuf->x * cbuf->type * sizeof(float)); // always 4 byte align.
mask= cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, cbuf->type);
//Convert to 8 bit unsigned
cvConvertScale(img, mask,1,0);
return mask;
}else{
return NULL;
}
}
示例7: gst_motiondetect_log_image
static void
gst_motiondetect_log_image (const IplImage * image,
const char * debugDirectory, int index, const char * filename)
{
if (image && debugDirectory) {
char *filepath;
asprintf (&filepath, "%s/%05d_%s", debugDirectory, index, filename);
if (image->depth == IPL_DEPTH_32F) {
IplImage *scaledImageToLog = cvCreateImage (
cvSize (image->width, image->height), IPL_DEPTH_8U, 1);
cvConvertScale (image, scaledImageToLog, 255.0, 0);
cvSaveImage (filepath, scaledImageToLog, NULL);
cvReleaseImage (&scaledImageToLog);
} else {
cvSaveImage (filepath, image, NULL);
}
free (filepath);
}
}
示例8: cvCopy
void CamShift::CalcHistogram(const ImgBgr& img, const CRect& sel)
{
selection.x = sel.left;
selection.y = img.Height()-sel.bottom-1;
selection.width = sel.Width();
selection.height = sel.Height();
cvCopy(ImgIplImage(img), image, 0 );
cvCvtColor( image, hsv, CV_BGR2HSV );
cvFlip(hsv,hsv,0);
//cvSaveImage("hsv.bmp", hsv);
//cvSaveImage("img.bmp", image);
int _vmin = vmin, _vmax = vmax;
cvInRangeS( hsv, cvScalar(0,smin,MIN(_vmin,_vmax),0),
cvScalar(180,256,MAX(_vmin,_vmax),0), mask );
cvSplit( hsv, hue, 0, 0, 0 );
float max_val = 0.f;
cvSetImageROI(hue, selection );
cvSetImageROI( mask, selection );
cvCalcHist( &hue, hist, 0, mask );
cvGetMinMaxHistValue( hist, 0, &max_val, 0, 0 );
cvConvertScale( hist->bins, hist->bins, max_val ? 255. / max_val : 0., 0 );
cvResetImageROI( hue );
cvResetImageROI( mask );
track_window = selection;
// cvZero( histimg );
// int bin_w = histimg->width / hdims;
// for(int i = 0; i < hdims; i++ )
// {
// int a = cvGetReal1D(hist->bins,i);
// int val = cvRound( cvGetReal1D(hist->bins,i)*histimg->height/255 );
// CvScalar color = hsv2rgb(i*180.f/hdims);
// cvRectangle( histimg, cvPoint(i*bin_w,histimg->height),
// cvPoint((i+1)*bin_w,histimg->height - val),
// color, -1, 8, 0 );
// }
// cvNamedWindow( "Histogram", 1 );
//
// cvShowImage( "Histogram", histimg );
}
示例9: startTracking
//////////////////////////////////
// startTracking()
//
void startTracking(IplImage * pImg, CvRect * pFaceRect)
{
float maxVal = 0.f;
// Make sure internal data structures have been allocated
if( !pHist ) createTracker(pImg);
// Create a new hue image
updateHueImage(pImg);
// Create a histogram representation for the face
cvSetImageROI( pHueImg, *pFaceRect );
cvSetImageROI( pMask, *pFaceRect );
cvCalcHist( &pHueImg, pHist, 0, pMask );
cvGetMinMaxHistValue( pHist, 0, &maxVal, 0, 0 );
cvConvertScale( pHist->bins, pHist->bins, maxVal? 255.0/maxVal : 0, 0 );
cvResetImageROI( pHueImg );
cvResetImageROI( pMask );
// Store the previous face location
prevFaceRect = *pFaceRect;
}
示例10: cvL1QCSolve
int cvL1QCSolve( CvMat* A, CvMat* B, CvMat* X, double epsilon, double mu, CvTermCriteria lb_term_crit, CvTermCriteria cg_term_crit )
{
CvMat* AAt = cvCreateMat( A->rows, A->rows, CV_MAT_TYPE(A->type) );
cvGEMM( A, A, 1, NULL, 0, AAt, CV_GEMM_B_T );
CvMat* W = cvCreateMat( A->rows, 1, CV_MAT_TYPE(X->type) );
if ( cvCGSolve( AAt, B, W, cg_term_crit ) > .5 )
{
cvReleaseMat( &W );
cvReleaseMat( &AAt );
return -1;
}
cvGEMM( A, W, 1, NULL, 0, X, CV_GEMM_A_T );
cvReleaseMat( &W );
cvReleaseMat( &AAt );
CvMat* U = cvCreateMat( X->rows, X->cols, CV_MAT_TYPE(X->type) );
cvAbsDiffS( X, U, cvScalar(0) );
CvScalar sumAbsX = cvSum( U );
double minAbsX, maxAbsX;
cvMinMaxLoc( U, &minAbsX, &maxAbsX );
cvConvertScale( U, U, .95, maxAbsX * .1 );
double tau = MAX( (2 * X->rows + 1) / sumAbsX.val[0], 1 );
if ( !(lb_term_crit.type & CV_TERMCRIT_ITER) )
lb_term_crit.max_iter = ceil( (log(2 * X->rows + 1) - log(lb_term_crit.epsilon) - log(tau)) / log(mu) );
CvTermCriteria nt_term_crit = cvTermCriteria( CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 50, lb_term_crit.epsilon );
for ( int i = 0; i < lb_term_crit.max_iter; ++i )
{
icvL1QCNewton( A, B, X, U, epsilon, tau, nt_term_crit, cg_term_crit );
tau *= mu;
}
cvReleaseMat( &U );
return 0;
}
示例11: cvInRangeS
void BoatDetecting::startTrackObject(){
cvInRangeS(hsv, cvScalar(0, smin, MIN(vmin, vmax), 0), cvScalar(180, 256, MAX(vmin, vmax), 0), mask);
// 10,256,30
cvSplit(hsv, hue, 0, 0, 0);
if (!isTrackingInitialized){ // 如果跟踪窗口未初始化
float max_val = 0.f;
cvSetImageROI(hue, selection);
cvSetImageROI(mask, selection);
cvCalcHist(&hue, hist, 0, mask);
cvGetMinMaxHistValue(hist, 0, &max_val, 0, 0);
cvConvertScale(hist->bins, hist->bins, max_val ? 255. / max_val : 0., 0);
cvResetImageROI(hue);
cvResetImageROI(mask);
trackWindow = selection;
isTrackingInitialized = true;
}
cvCalcBackProject(&hue, backproject, hist);
//cvShowImage("Hue Channel",backproject);
cvAnd(backproject, mask, backproject, 0);
//if (trackWindow.x + trackWindow.width/2< allfWidth &&trackWindow.y + trackWindow.height/2< allfHeight &&trackWindow.x>0)
if (trackWindow.x + trackWindow.width< allfWidth &&trackWindow.y + trackWindow.height< allfHeight &&trackWindow.x>0)
cvCamShift(backproject, trackWindow, cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 20, 1), &trackComp, 0);//初始化跟踪窗口以后直接用trackWindow做跟踪,每帧都会更新
//if (trackComp.rect.width<90 && trackComp.rect.y<200){
// trackWindow = trackComp.rect;
//}
//if (trackComp.rect.y>200)
//{
// trackWindow = trackComp.rect;
//}
trackWindow = trackComp.rect;
}
示例12: color
/*
Converts an image to 32-bit grayscale
@param img a 3-channel 8-bit color (BGR) or 8-bit gray image
@return Returns a 32-bit grayscale image
*/
static IplImage* convert_to_gray32( IplImage* img )
{
IplImage* gray8, * gray32;
gray32 = cvCreateImage( cvGetSize(img), IPL_DEPTH_32F, 1 );//创建32位单通道图像
//首先将原图转换为8位单通道图像
if( img->nChannels == 1 )//若原图本身就是单通道,直接克隆原图
gray8 = cvClone( img );
else//若原图是3通道图像
{
gray8 = cvCreateImage( cvGetSize(img), IPL_DEPTH_8U, 1 );//创建8位单通道图像
cvCvtColor( img, gray8, CV_BGR2GRAY );//将原图转换为8为单通道图像
}
//然后将8为单通道图像gray8转换为32位单通道图像,并进行归一化处理(除以255)
cvConvertScale( gray8, gray32, 1.0 / 255.0, 0 );
cvReleaseImage( &gray8 );//释放临时图像
return gray32;//返回32位单通道图像
}
示例13: renderChainsWithBoxes
void renderChainsWithBoxes(IplImage * SWTImage,
std::vector<std::vector<Point2d> > & components,
std::vector<Chain> & chains,
std::vector<std::pair<Point2d, Point2d> > & compBB,
std::vector<std::pair<CvPoint, CvPoint> > & bb,
IplImage * output) {
// keep track of included components
std::vector<bool> included;
included.reserve(components.size());
for (unsigned int i = 0; i != components.size(); i++) {
included.push_back(false);
}
for (std::vector<Chain>::iterator it = chains.begin(); it != chains.end();
it++) {
for (std::vector<int>::iterator cit = it->components.begin();
cit != it->components.end(); cit++) {
included[*cit] = true;
}
}
std::vector<std::vector<Point2d> > componentsRed;
for (unsigned int i = 0; i != components.size(); i++) {
if (included[i]) {
componentsRed.push_back(components[i]);
}
}
IplImage * outTemp = cvCreateImage(cvGetSize(output), IPL_DEPTH_32F, 1);
LOGL(LOG_CHAINS, componentsRed.size() << " components after chaining");
renderComponents(SWTImage, componentsRed, outTemp);
bb = findBoundingBoxes(chains, compBB, outTemp);
IplImage * out = cvCreateImage(cvGetSize(output), IPL_DEPTH_8U, 1);
cvConvertScale(outTemp, out, 255, 0);
cvCvtColor(out, output, CV_GRAY2RGB);
cvReleaseImage(&out);
cvReleaseImage(&outTemp);
}
示例14: cvSetReal2D
//------------------------------------------------------------------------------
// Color Similarity Matrix Calculation
//------------------------------------------------------------------------------
CvMat *colorsim(int nbins, double sigma) {
CvMat *xc=cvCreateMat(1,nbins, CV_32FC1);
CvMat *yr=cvCreateMat(nbins,1, CV_32FC1);
CvMat *x=cvCreateMat(nbins,nbins, CV_32FC1);
CvMat *y=cvCreateMat(nbins,nbins, CV_32FC1);
CvMat *m=cvCreateMat(x->rows,x->rows, CV_32FC1);
// Set x,y directions
for (int j=0;j<nbins;j++) {
cvSetReal2D(xc,0,j,(j+1-0.5)/nbins);
cvSetReal2D(yr,j,0,(j+1-0.5)/nbins);
}
// Set u,v, meshgrids
for (int i=0;i<x->rows;i++) {
cvRepeat(xc,x);
cvRepeat(yr,y);
}
CvMat *sub = cvCreateMat(x->rows,y->cols,CV_32FC1);
cvSub(x,y,sub);
cvAbs(sub,sub);
cvMul(sub,sub,sub);
cvConvertScale(sub,sub,-1.0/(2*sigma*sigma));
cvExp(sub,sub);
cvSubRS(sub,cvScalar(1.0),m);
cvReleaseMat(&xc);
cvReleaseMat(&yr);
cvReleaseMat(&x);
cvReleaseMat(&y);
cvReleaseMat(&sub);
return m;
}
示例15: cvConvertScale
//============================================================================
void AAM_IC::InverseCompose(const CvMat* dpq, const CvMat* s, CvMat* NewS)
{
// Firstly: Estimate the corresponding changes to the base mesh
cvConvertScale(dpq, __inv_pq, -1);
__shape.CalcShape(__inv_pq, __update_s0); // __update_s0 = N.W(s0, -delta_p, -delta_q)
//Secondly: Composing the Incremental Warp with the Current Warp Estimate.
double *S0 = __update_s0->data.db;
double *S = s->data.db;
double *SEst = NewS->data.db;
double x, y, xw, yw;
int k, tri_idx;
int v1, v2, v3;
const std::vector<std::vector<int> >& tri = __paw.__tri;
const std::vector<std::vector<int> >& vtri = __paw.__vtri;
for(int i = 0; i < __shape.nPoints(); i++)
{
x = 0.0; y = 0.0;
k = 0;
//The only problem with this approach is which triangle do we use?
//In general there will be several triangles that share the i-th vertex.
for(k = 0; k < vtri[i].size(); k++)// see Figure (11)
{
tri_idx = vtri[i][k];
v1 = tri[tri_idx][0];
v2 = tri[tri_idx][1];
v3 = tri[tri_idx][2];
AAM_PAW::Warp(S0[2*i],S0[2*i+1],
__sMean[v1].x, __sMean[v1].y,__sMean[v2].x, __sMean[v2].y,__sMean[v3].x, __sMean[v3].y,
xw, yw, S[2*v1], S[2*v1+1], S[2*v2], S[2*v2+1], S[2*v3], S[2*v3+1]);
x += xw; y += yw;
}
// average the result so as to smooth the warp at each vertex
SEst[2*i] = x/k; SEst[2*i+1] = y/k;
}
}