本文整理汇总了C++中DImage类的典型用法代码示例。如果您正苦于以下问题:C++ DImage类的具体用法?C++ DImage怎么用?C++ DImage使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了DImage类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: ParImageToIplImage
cv::Mat ParImageToIplImage(DImage& img)
{
int width = img.width();
int height = img.height();
int nChannels = img.nchannels();
if(width <= 0 || height <= 0 || nChannels != 1)
return cv::Mat();
BaseType*& pData = img.data();
cv::Mat image = cv::Mat(height, width, CV_MAKETYPE(8, 1));
for(int i = 0;i < height;i++)
{
for(int j = 0;j < width;j++)
{
image.ptr<uchar>(i)[j] = pData[i*width + j] * 255;
}
}
return image;
}
示例2: weight1
void OpticalFlow::estGaussianMixture(const DImage& Im1,const DImage& Im2,GaussianMixture& para,double prior)
{
int nIterations = 3, nChannels = Im1.nchannels();
DImage weight1(Im1),weight2(Im1);
double *total1,*total2;
total1 = new double[nChannels];
total2 = new double[nChannels];
for(int count = 0; count<nIterations; count++)
{
double temp;
memset(total1,0,sizeof(double)*nChannels);
memset(total2,0,sizeof(double)*nChannels);
// E step
for(int i = 0;i<weight1.npixels();i++)
for(int k=0;k<nChannels;k++)
{
int offset = i*weight1.nchannels()+k;
temp = Im1[offset]-Im2[offset];
temp *= temp;
weight1[offset] = para.Gaussian(temp,0,k)*para.alpha[k];
weight2[offset] = para.Gaussian(temp,1,k)*(1-para.alpha[k]);
temp = weight1[offset]+weight2[offset];
weight1[offset]/=temp;
weight2[offset]/=temp;
total1[k] += weight1[offset];
total2[k] += weight2[offset];
}
// M step
para.reset();
for(int i = 0;i<weight1.npixels();i++)
for(int k =0;k<nChannels;k++)
{
int offset = i*weight1.nchannels()+k;
temp = Im1[offset]-Im2[offset];
temp *= temp;
para.sigma[k]+= weight1[offset]*temp;
para.beta[k] += weight2[offset]*temp;
}
for(int k =0;k<nChannels;k++)
{
para.alpha[k] = total1[k]/(total1[k]+total2[k])*(1-prior)+0.95*prior; // regularize alpha
para.sigma[k] = sqrt(para.sigma[k]/total1[k]);
para.beta[k] = sqrt(para.beta[k]/total2[k])*(1-prior)+0.3*prior; // regularize beta
}
para.square();
count = count;
}
}
示例3: apply_clustering
DPlane DColorCluster::apply_clustering(const DImage &input)
{
DPlane result(input.rows(), input.cols());
for(int i=0; i<input.rows(); i++)
for(int j=0; j<input.cols(); j++)
{
int closest_cluster=0;
double min_dist=1000000000;
DTriple sample(input[0][i][j], input[1][i][j], input[2][i][j]);
for(int c=0; c<clusters.size(); c++)
if(clusters[c].distance_to(sample) < min_dist)
{
min_dist = clusters[c].distance_to(sample);
closest_cluster = c;
}
result[i][j] = closest_cluster+1;
}
return result;
}
示例4: ConstructPyramidLevels
void GaussianPyramid::ConstructPyramidLevels(const DImage &image, double ratio, int _nLevels)
{
// the ratio cannot be arbitrary numbers
if(ratio>0.98 || ratio<0.4)
ratio=0.75;
nLevels = _nLevels;
if(ImPyramid!=NULL)
delete []ImPyramid;
ImPyramid=new DImage[nLevels];
ImPyramid[0].copyData(image);
double baseSigma=(1/ratio-1);
int n=log(0.25)/log(ratio);
double nSigma=baseSigma*n;
for(int i=1;i<nLevels;i++)
{
DImage foo;
if(i<=n)
{
double sigma=baseSigma*i;
image.GaussianSmoothing(foo,sigma,sigma*3);
foo.imresize(ImPyramid[i],pow(ratio,i));
}
else
{
ImPyramid[i-n].GaussianSmoothing(foo,nSigma,nSigma*3);
double rate=(double)pow(ratio,i)*image.width()/foo.width();
foo.imresize(ImPyramid[i],rate);
}
}
}
示例5: WritePPMImage
void WritePPMImage(const DImage &img, const char *filename)
{
FILE *fp = fopen(filename, "wb");
char temp[1024];
// write magic number
fprintf(fp, "P6\n");
// write dimensions
fprintf(fp, "%d %d\n", img.cols(), img.rows());
// write max pixel value
fprintf(fp, "255\n");
for(int i=0; i<img.rows(); i++)
for(int j=0; j<img.cols(); j++)
for(int k=0; k<3; k++)
fputc(int(img[k][i][j]), fp);
fclose(fp);
return;
}
示例6: sizeof
/**Each row of img is projected onto the vertical axis. Resulting
data length will be equal to the height of img. The profile is a summation
of the grayscale values in each row. If fNormalize is true, then each value
is divided by img.width() so it is the average grayscale value for the row
instead of the sum.
If fNormalize is true, the resulting profile values are divided by the image
width.
*/
void DProfile::getImageVerticalProfile(const DImage &img, bool fNormalize){
int w, h;
w = img.width();
h = img.height();
// allocate the rgProf array
if(NULL == rgProf){
rgProf = (double*)malloc(h * sizeof(double));
D_CHECKPTR(rgProf);
len = h;
}
else{
if(len != h){
rgProf = (double*)realloc(rgProf,h*sizeof(double));
D_CHECKPTR(rgProf);
len = h;
}
}
switch(img.getImageType()){
case DImage::DImage_u8:
{
D_uint8 *pu8;
pu8=img.dataPointer_u8();
for(int y = 0, idx=0; y < h; ++y){
rgProf[y] = 0.;
for(int x = 0; x < w; ++x, ++idx){
rgProf[y] += pu8[idx];
}
if(fNormalize)
rgProf[y] /= w;
}
}
break;
case DImage::DImage_flt_multi:
{
float *pflt;
if(img.numChannels() > 1){
fprintf(stderr,"DProfile::getImageVerticalProfile() floats only "
"supported with a single channel\n");
abort();
}
pflt=img.dataPointer_flt(0);
for(int y = 0, idx=0; y < h; ++y){
rgProf[y] = 0.;
for(int x = 0; x < w; ++x, ++idx){
rgProf[y] += pflt[idx];
}
if(fNormalize)
rgProf[y] /= w;
}
}
break;
default:
fprintf(stderr, "Not yet implemented!\n");
abort();
}//end switch(img.getImageType())
}
示例7: showFlow
bool OpticalFlow::showFlow(const DImage& flow,const char* filename)
{
if(flow.nchannels()!=1)
{
cout<<"The flow must be a single channel image!"<<endl;
return false;
}
Image<unsigned char> foo;
foo.allocate(flow.width(),flow.height());
double Max = flow.max();
double Min = flow.min();
for(int i = 0;i<flow.npixels(); i++)
foo[i] = (flow[i]-Min)/(Max-Min)*255;
foo.imwrite(filename);
}
示例8: warpFL
void OpticalFlow::warpFL(DImage &warpIm2, const DImage &Im1, const DImage &Im2, const DImage &Flow)
{
if(warpIm2.matchDimension(Im2)==false)
warpIm2.allocate(Im2.width(),Im2.height(),Im2.nchannels());
ImageProcessing::warpImageFlow(warpIm2.data(),Im1.data(),Im2.data(),Flow.data(),Im2.width(),Im2.height(),Im2.nchannels());
}
示例9: generateCoarserLevel
//------------------------------------------------------------------------------------------------
// multi-grid belie propagation
//------------------------------------------------------------------------------------------------
void BPFlow::generateCoarserLevel(BPFlow &bp)
{
//------------------------------------------------------------------------------------------------
// set the dimensions and parameters
//------------------------------------------------------------------------------------------------
bp.Width=Width/2;
if(Width%2==1)
bp.Width++;
bp.Height=Height/2;
if(Height%2==1)
bp.Height++;
bp.Area=bp.Width*bp.Height;
bp.s=s;
bp.d=d;
DImage foo;
Im_s.smoothing(foo);
foo.imresize(bp.Im_s,bp.Width,bp.Height);
Im_d.smoothing(foo);
foo.imresize(bp.Im_d,bp.Width,bp.Height);
bp.IsDisplay=IsDisplay;
bp.nNeighbors=nNeighbors;
//------------------------------------------------------------------------------------------------
// allocate buffers
//------------------------------------------------------------------------------------------------
for(int i=0;i<2;i++)
{
bp.pOffset[i]=new int[bp.Area];
bp.pWinSize[i]=new int[bp.Area];
ReduceImage(bp.pOffset[i],Width,Height,pOffset[i]);
ReduceImage(bp.pWinSize[i],Width,Height,pWinSize[i]);
for(int j = 0;j<bp.Area;j++)
bp.pWinSize[i][j] = __max(bp.pWinSize[i][j],1);
}
//------------------------------------------------------------------------------------------------
// generate data term
//------------------------------------------------------------------------------------------------
bp.nTotalMatches=bp.AllocateBuffer(bp.pDataTerm,bp.ptrDataTerm,bp.pWinSize[0],bp.pWinSize[1]);
for(int i=0;i<bp.Height;i++)
for(int j=0;j<bp.Width;j++)
{
int offset=i*bp.Width+j;
for(int ii=0;ii<2;ii++)
for(int jj=0;jj<2;jj++)
{
int y=i*2+ii;
int x=j*2+jj;
if(y<Height && x<Width)
{
int nStates=(bp.pWinSize[0][offset]*2+1)*(bp.pWinSize[1][offset]*2+1);
for(int k=0;k<nStates;k++)
bp.pDataTerm[offset].data()[k]+=pDataTerm[y*Width+x].data()[k];
}
}
}
//------------------------------------------------------------------------------------------------
// generate range term
//------------------------------------------------------------------------------------------------
bp.ComputeRangeTerm(gamma/2);
}
示例10: tan
/**The slant angle is assumed to be between 60 and -45 degrees (0 deg=vertical,
* negative values are left-slanted, positive values right-slanted).
* To determine slant: at each x-position, the longest runlength at each angle
* is found and its squared value is added into the accumulator for that angle.
* The histogram is smoothed, and the angle corresponding to the highest value
* in the histogram is the returned angle (in degrees).
*
* Runlengths of less than rlThresh pixels are ignored.
*
* The image should be black(0) and white(255). The portion of the image
* specified by x0,y0 - x1,y1 is considered to be the textline of interest.
* If no coordinates are specified, then the entire image is used as the
* textline.
*
* If weight is not NULL, it will be the sum of max runlengths (not squared) at
* all 120 angles. Weights are used in determination of weighted average angle
* for all textlines in getAllTextlinesSlantAngleDeg() before adjusting angles.
*
* If rgSlantHist is not NULL, the squared max RL values in the angle histogram
* will be copied into the rgSlantHist array. It must already be allocated to
* 120*sizeof(unsigned int).
*
* if imgAngleHist is not NULL, then the image is set to w=120 and h=y1-y0+1.
* It is a (grayscale) graphical representation of what is in rgSlantHist.
*/
double DSlantAngle::getTextlineSlantAngleDeg(DImage &imgBW,
int rlThresh,
int x0,int y0,int x1,int y1,
double *weight,
unsigned int *rgSlantHist,
DImage *imgAngleHist){
int *rgLineSlantAngles;
int lineH;
int slantOffset, slantAngle, angle;
unsigned int rgSlantSums[120];
unsigned int rgSlantSumsTmp[120];
int runlen, maxrl; /* maximum slant runlen */
double slantDx;
int w, h;
D_uint8 *p8;
double dblWeight = 0;
w = imgBW.width();
h = imgBW.height();
p8 = imgBW.dataPointer_u8();
if(-1 == x1)
x1 = w-1;
if(-1 == y1)
y1 = h-1;
lineH = y1-y0+1;
/* estimate the predominant slant angle (0=vertical, +right, -left) */
slantOffset = (int)(0.5+ (lineH / 2.0) / tan(DMath::degreesToRadians(30.)));
for(int j = 0; j < 120; ++j){
rgSlantSums[j] = 0;
rgSlantSumsTmp[j] = 0;
}
for(angle = -45; angle <= 60; angle += 1){
/* at each x-position, sum the maximum run length at that angle into the
accumulator */
if(0 == angle) /* vertical, so tangent is infinity */
slantDx = 0.;
else
slantDx = -1.0 / tan(DMath::degreesToRadians(90-angle));
// for(j = slantOffset; j < (hdr.w-slantOffset); ++j){
for(int j = x0; j <= x1; ++j){
maxrl = 0;
runlen = 0;
for(int y = 0; y < lineH; ++y){
int x;
x = (int)(0.5+ j + y * slantDx);
if( (x>=x0) && (x <= x1)){ /* make sure we are within bounds */
int idxtmp;
idxtmp = (y+y0)*w+x;
// imgCoded[idxtmp*3] = 0;
if(0 == p8[idxtmp]){
++runlen;
if(runlen > maxrl){
maxrl = runlen;
}
}
else
runlen = 0;
} /* end if in bounds */
else{
runlen = 0; /* ignore runs that go off edge of image */
}
}
if(maxrl > rlThresh){
rgSlantSums[angle+45] += maxrl*maxrl;
dblWeight += maxrl;
}
} /* end for j */
} /* end for angle */
//smooth the histogram
rgSlantSumsTmp[0] = (rgSlantSums[0] + rgSlantSums[1]) / 2;
for(int aa = 1; aa < 119; ++aa){
rgSlantSumsTmp[aa]=(rgSlantSums[aa-1]+rgSlantSums[aa]+rgSlantSums[aa+1])/3;
//.........这里部分代码省略.........
示例11: getSlant_thread_func
void* DSlantAngle::getSlant_thread_func(void *params){
SLANT_THREAD_PARMS *pparms;
int numThreads;
int w, h;
D_uint8 *p8;
int runlen, maxrl; /* maximum slant runlen */
double slantDx;
int lineH;
int slantOffset, slantAngle, angle;
double dblWeight;
DImage *pimg;
int rlThresh;
pparms = (SLANT_THREAD_PARMS*)params;
numThreads = pparms->numThreads;
pimg = pparms->pImgSrc;
rlThresh = pparms->rlThresh;
w = pimg->width();
h = pimg->height();
p8 = pimg->dataPointer_u8();
for(int i=0; i < 120; ++i)
pparms->rgSlantSums[i] = 0;
for(int tl=pparms->threadNum; tl < (pparms->numTextlines); tl+=numThreads){
int x0, y0, x1, y1;
unsigned int rgSlantSums[120];
x0 = pparms->rgRects[tl].x;
y0 = pparms->rgRects[tl].y;
x1 = pparms->rgRects[tl].x + pparms->rgRects[tl].w - 1;
y1 = pparms->rgRects[tl].y + pparms->rgRects[tl].h - 1;
lineH = y1-y0+1;
memset(rgSlantSums, 0, sizeof(int)*120);
dblWeight = 0.;
for(angle = -45; angle <= 60; angle += 1){
/* at each x-position, sum the maximum run length at that angle into the
accumulator */
if(0 == angle) /* vertical, so tangent is infinity */
slantDx = 0.;
else
slantDx = -1.0 / tan(DMath::degreesToRadians(90-angle));
// for(j = slantOffset; j < (hdr.w-slantOffset); ++j){
for(int j = x0; j <= x1; ++j){
maxrl = 0;
runlen = 0;
for(int y = 0; y < lineH; ++y){
int x;
x = (int)(0.5+ j + y * slantDx);
if( (x>=x0) && (x <= x1)){ /* make sure we are within bounds */
int idxtmp;
idxtmp = (y+y0)*w+x;
// imgCoded[idxtmp*3] = 0;
if(0 == p8[idxtmp]){
++runlen;
if(runlen > maxrl){
maxrl = runlen;
}
}
else
runlen = 0;
} /* end if in bounds */
else{
runlen = 0; /* ignore runs that go off edge of image */
}
}
if(maxrl > rlThresh){
rgSlantSums[angle+45] += maxrl*maxrl;
dblWeight += maxrl;
}
} /* end for j */
} /* end for angle */
for(int i=0; i < 120; ++i)
pparms->rgSlantSums[i] += rgSlantSums[i];
if(NULL != (pparms->rgWeights)){
pparms->rgWeights[tl] = dblWeight;
}
if(NULL != (pparms->rgAngles)){
// need to independently figure out the angle for this particular textline
unsigned int rgSlantSumsTmp[120];
//smooth the histogram
rgSlantSumsTmp[0] = (rgSlantSums[0] + rgSlantSums[1]) / 2;
for(int aa = 1; aa < 119; ++aa){
rgSlantSumsTmp[aa]=(rgSlantSums[aa-1]+rgSlantSums[aa]+rgSlantSums[aa+1])/3;
}
// for(int aa = 0; aa < 120; ++aa){
// rgSlantSums[aa] = rgSlantSumsTmp[aa];
// }
//use the smoothed histogram peak as the slant angle
slantAngle = 0;
for(angle = -45; angle <= 60; angle += 1){
if(rgSlantSumsTmp[angle+45] > rgSlantSumsTmp[slantAngle+45]){
slantAngle = angle;
}
} /* end for angle */
pparms->rgAngles[tl] = slantAngle;
}
}
//.........这里部分代码省略.........
示例12: main_Regular
void main_Regular(int idx)
{
if(idx > 18 || idx < 0)
return;
int boarder_size = 16;
int center_width = 256;
int center_height = 256;
int width = center_width+boarder_size*2;
int height = center_height+boarder_size*2;
bool cut_boarder = true;
int data_index = idx;
const static int BUF_LEN = 200;
char out_flow_fold[BUF_LEN] = {0};
char out_par_fold[BUF_LEN] = {0};
int par_num = 0;
int vort_num = 0;
double max_vort = 1;
double min_vort = 0.6;
double max_vort_radius = 20;
double min_vort_radius = 15;
bool use_peroid_coord = false;
double base_vel_u = 0;
double base_vel_v = 0;
int skip_frames = 0;
int coarse_len = 16;
ZQ_PIVMovingObject* mvobj = 0;
DImage par_mask(width,height);
switch(data_index)
{
case 0:
strcpy_s(out_flow_fold, BUF_LEN,"flow0");
strcpy_s(out_par_fold, BUF_LEN,"par0");
srand(1000);
par_num = 10000;
vort_num = 40;
max_vort = 1.8;
min_vort = 1.2;
max_vort_radius = 20;
min_vort_radius = 20;
use_peroid_coord = true;
boarder_size = 64;
width = center_width + 2*boarder_size;
height = center_height + 2*boarder_size;
coarse_len = 32;
par_mask.allocate(width,height);
for(int y = 0;y < height;y++)
{
for(int x = 0;x < width;x++)
{
if(x < coarse_len || x >= width - coarse_len)
par_mask.data()[y*width+x] = 1;
}
}
base_vel_u = 5;
base_vel_v = 0;
skip_frames = 20;
break;
case 1:
strcpy_s(out_flow_fold, BUF_LEN,"flow1");
strcpy_s(out_par_fold, BUF_LEN,"par1");
srand(2000);
par_num = 10000;
vort_num = 40;
max_vort = 1.6;
min_vort = 0.8;
max_vort_radius = 20;
min_vort_radius = 20;
use_peroid_coord = true;
coarse_len = 48;
par_mask.allocate(width,height);
for(int y = 0;y < height;y++)
{
for(int x = 0;x < width;x++)
{
if(x >= width - coarse_len)
par_mask.data()[y*width+x] = 1;
}
}
base_vel_u = 5;
base_vel_v = 0;
skip_frames = 20;
break;
case 2:
strcpy_s(out_flow_fold, BUF_LEN,"flow2");
strcpy_s(out_par_fold, BUF_LEN,"par2");
srand(2000);
par_num = 10000;
vort_num = 40;
//.........这里部分代码省略.........
示例13: if
//---------------------------------------------------------------------------------------
// function to convert image to feature image
//---------------------------------------------------------------------------------------
void OpticalFlow::im2feature(DImage &imfeature, const DImage &im)
{
int width=im.width();
int height=im.height();
int nchannels=im.nchannels();
if(nchannels==1)
{
imfeature.allocate(im.width(),im.height(),3);
DImage imdx,imdy;
im.dx(imdx,true);
im.dy(imdy,true);
_FlowPrecision* data=imfeature.data();
for(int i=0;i<height;i++)
for(int j=0;j<width;j++)
{
int offset=i*width+j;
data[offset*3]=im.data()[offset];
data[offset*3+1]=imdx.data()[offset];
data[offset*3+2]=imdy.data()[offset];
}
}
else if(nchannels==3)
{
DImage grayImage;
im.desaturate(grayImage);
imfeature.allocate(im.width(),im.height(),5);
DImage imdx,imdy;
grayImage.dx(imdx,true);
grayImage.dy(imdy,true);
_FlowPrecision* data=imfeature.data();
for(int i=0;i<height;i++)
for(int j=0;j<width;j++)
{
int offset=i*width+j;
data[offset*5]=grayImage.data()[offset];
data[offset*5+1]=imdx.data()[offset];
data[offset*5+2]=imdy.data()[offset];
data[offset*5+3]=im.data()[offset*3+1]-im.data()[offset*3];
data[offset*5+4]=im.data()[offset*3+1]-im.data()[offset*3+2];
}
}
else
imfeature.copyData(im);
}
示例14: du
//--------------------------------------------------------------------------------------------------------
// function to compute optical flow field using two fixed point iterations
// Input arguments:
// Im1, Im2: frame 1 and frame 2
// warpIm2: the warped frame 2 according to the current flow field u and v
// u,v: the current flow field, NOTICE that they are also output arguments
//
//--------------------------------------------------------------------------------------------------------
void OpticalFlow::SmoothFlowSOR(const DImage &Im1, const DImage &Im2, DImage &warpIm2, DImage &u, DImage &v,
double alpha, int nOuterFPIterations, int nInnerFPIterations, int nSORIterations)
{
DImage mask,imdx,imdy,imdt;
int imWidth,imHeight,nChannels,nPixels;
imWidth=Im1.width();
imHeight=Im1.height();
nChannels=Im1.nchannels();
nPixels=imWidth*imHeight;
DImage du(imWidth,imHeight),dv(imWidth,imHeight);
DImage uu(imWidth,imHeight),vv(imWidth,imHeight);
DImage ux(imWidth,imHeight),uy(imWidth,imHeight);
DImage vx(imWidth,imHeight),vy(imWidth,imHeight);
DImage Phi_1st(imWidth,imHeight);
DImage Psi_1st(imWidth,imHeight,nChannels);
DImage imdxy,imdx2,imdy2,imdtdx,imdtdy;
DImage ImDxy,ImDx2,ImDy2,ImDtDx,ImDtDy;
DImage foo1,foo2;
double prob1,prob2,prob11,prob22;
double varepsilon_phi=pow(0.001,2);
double varepsilon_psi=pow(0.001,2);
//--------------------------------------------------------------------------
// the outer fixed point iteration
//--------------------------------------------------------------------------
for(int count=0;count<nOuterFPIterations;count++)
{
// compute the gradient
getDxs(imdx,imdy,imdt,Im1,warpIm2);
// generate the mask to set the weight of the pxiels moving outside of the image boundary to be zero
genInImageMask(mask,u,v);
// set the derivative of the flow field to be zero
du.reset();
dv.reset();
//--------------------------------------------------------------------------
// the inner fixed point iteration
//--------------------------------------------------------------------------
for(int hh=0;hh<nInnerFPIterations;hh++)
{
// compute the derivatives of the current flow field
if(hh==0)
{
uu.copyData(u);
vv.copyData(v);
}
else
{
uu.Add(u,du);
vv.Add(v,dv);
}
uu.dx(ux);
uu.dy(uy);
vv.dx(vx);
vv.dy(vy);
// compute the weight of phi
Phi_1st.reset();
_FlowPrecision* phiData=Phi_1st.data();
double temp;
const _FlowPrecision *uxData,*uyData,*vxData,*vyData;
uxData=ux.data();
uyData=uy.data();
vxData=vx.data();
vyData=vy.data();
double power_alpha = 0.5;
for(int i=0;i<nPixels;i++)
{
temp=uxData[i]*uxData[i]+uyData[i]*uyData[i]+vxData[i]*vxData[i]+vyData[i]*vyData[i];
//phiData[i]=power_alpha*pow(temp+varepsilon_phi,power_alpha-1);
phiData[i] = 0.5/sqrt(temp+varepsilon_phi);
//phiData[i] = 1/(power_alpha+temp);
}
// compute the nonlinear term of psi
Psi_1st.reset();
_FlowPrecision* psiData=Psi_1st.data();
const _FlowPrecision *imdxData,*imdyData,*imdtData;
const _FlowPrecision *duData,*dvData;
imdxData=imdx.data();
imdyData=imdy.data();
imdtData=imdt.data();
duData=du.data();
dvData=dv.data();
double _a = 10000, _b = 0.1;
//.........这里部分代码省略.........
示例15: do_clustering
// k clusters per channel = k^3 clusters
DPlane DColorCluster::do_clustering(DImage &input, int k, bool ignore_black)
{
int cluster_count = k*k*k;
for(int i=0; i<cluster_count; i++)
clusters.push_back(DRGBCluster());
DPlane result(input.rows(), input.cols());
int delta = 256/(k*2);
for(int a=0, cl=0; a<k; a++)
for(int b=0; b<k; b++)
for(int c=0; c<k; c++, cl++)
clusters[cl].set_mean(
DTriple((a+1)*delta, (b+1)*delta, (c+1)*delta));
int changes=1000000000;
bool done=false;
while(!done) {
changes=0;
for(int i=0; i<input.rows(); i++)
for(int j=0; j<input.cols(); j++)
{
if(ignore_black && input[0][i][j] == 0 && input[1][i][j] == 0 &&
input[2][i][j] == 0)
{
result[i][j] = 0;
continue;
}
int closest_cluster=0;
double min_dist=1000000000;
DTriple sample(input[0][i][j], input[1][i][j], input[2][i][j]);
for(int c=0; c<cluster_count; c++)
if(clusters[c].distance_to(sample) < min_dist)
{
min_dist = clusters[c].distance_to(sample);
closest_cluster = c;
}
clusters[closest_cluster].add_sample(sample);
if(closest_cluster+1 != result[i][j])
{
result[i][j] = closest_cluster+1;
changes++;
}
}
if( changes < input.rows() * input.cols() * 0.001)
done=true;
else
for(int i=0; i<cluster_count; i++)
{
clusters[i].update_mean();
clusters[i].remove_all();
}
printf("there were %d changes\n",changes);
}
return result;
}