本文整理汇总了C++中IpVec::size方法的典型用法代码示例。如果您正苦于以下问题:C++ IpVec::size方法的具体用法?C++ IpVec::size怎么用?C++ IpVec::size使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类IpVec
的用法示例。
在下文中一共展示了IpVec::size方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: EstimateQueue
//生成有序的相对距离的deque
void CGVM::EstimateQueue(const vector<vector<float> >& table, int idx, IpVec& FeaVec, vector<DistanceRelation>& OQ)
{
Ipoint Fea=FeaVec[idx];
OQ.reserve(FeaVec.size());
for(int i=0;i<FeaVec.size();i++)
{
if(i!=idx)
{
DistanceRelation tmpDR;
tmpDR.dis=table[Fea.clusterIndex][FeaVec[i].clusterIndex];
tmpDR.idx=i;
if(!OQ.size()) OQ.push_back(tmpDR);
else
{
if(tmpDR.dis<=OQ.front().dis)
OQ.insert(OQ.begin(),tmpDR);
else if(tmpDR.dis>=OQ.back().dis)
OQ.push_back(tmpDR);
else
{
for(vector<DistanceRelation>::iterator it=OQ.begin()+1;it!=OQ.end();++it)
{
if(tmpDR.dis>=(it-1)->dis&&tmpDR.dis<=it->dis)
{
OQ.insert(it,tmpDR);
break;
}
}
}
}
}
}
}
示例2: getMatches
//! Populate IpPairVec with matched ipts
void getMatches(IpVec &ipts1, IpVec &ipts2, IpPairVec &matches) {
float dist, d1, d2;
Ipoint *match;
matches.clear();
for (unsigned int i = 0; i < ipts1.size(); i++) {
d1 = d2 = 1000;
for (unsigned int j = 0; j < ipts2.size(); j++) {
dist = ipts1[i] - ipts2[j];
if (dist < d1) // if this feature matches better than current best
{
d2 = d1;
d1 = dist;
match = &ipts2[j];
} else if (dist < d2) // this feature matches better than second best
{
d2 = dist;
}
}
// If match has a d1:d2 ratio < 0.65 ipoints are a match
if (d1 / d2 < 0.65) {
// Store the change in position
ipts1[i].dx = match->x - ipts1[i].x;
ipts1[i].dy = match->y - ipts1[i].y;
match->dx = ipts1[i].x -match->x;
match->dy = ipts1[i].y -match->y;
matches.push_back(std::make_pair(ipts1[i], *match));
}
}
}
示例3: DoExtractKeypointsDescriptor
bool OpenSURFdetector::DoExtractKeypointsDescriptor(IplImage* grayImage)
{
if(grayImage == NULL)
return false;
if(grayImage->nChannels != 1)
return false;
this->keypoints.clear();
IpVec ipts;
surfDetDes(grayImage, ipts, false, 5, 4, 2, this->threshold);
Ipoint *ipt;
windage::OpenSURFpoint point;
for(unsigned int i = 0; i < ipts.size(); i++)
{
ipt = &ipts.at(i);
point.SetPoint(windage::Vector3(ipt->x, ipt->y, 1.0));
point.SetSize(2.5f * ipt->scale);
point.SetDir(-ipt->orientation);
for(int j=0; j<point.DESCRIPTOR_DIMENSION; j++)
{
point.descriptor[j] = ipt->descriptor[j];
}
this->keypoints.push_back(point);
}
return true;
}
示例4: copySURFPts
void copySURFPts(ImageFeatures &dst, const IpVec src, const int length)
{
int i, j, size;
Ipoint temp;
size = src.size();
//Check if the object has been allocated
//Deallocate it first
if(dst.checkAlloc())
dst.dealloc();
// Allocated with the correct values
dst.alloc(length, size);
for(i = 0; i < size; i++)
{
temp = src.at(i);
float mag = 0;
//for(j = 0; j < length; j++)
// mag += temp.descriptor[j]*temp.descriptor[j];
//mag = sqrt(mag);
//for(j = 0; j < length; j++)
// temp.descriptor[j] *= mag;
// Copy each descriptor into the ImageFeature
dst.copyDescriptorAt(temp.descriptor, i);
}
}
示例5: mainKmeans
int mainKmeans(void)
{
IplImage *img = cvLoadImage("../imgs/img1.jpg");
IpVec ipts;
Kmeans km;
// Get Ipoints
surfDetDes(img,ipts,true,3,4,2,0.0006f);
for (int repeat = 0; repeat < 10; ++repeat)
{
IplImage *img = cvLoadImage("../imgs/img1.jpg");
km.Run(&ipts, 5, true);
drawPoints(img, km.clusters);
for (unsigned int i = 0; i < ipts.size(); ++i)
{
cvLine(img, cvPoint(ipts[i].x,ipts[i].y), cvPoint(km.clusters[ipts[i].clusterIndex].x ,km.clusters[ipts[i].clusterIndex].y),cvScalar(255,255,255));
}
showImage(img);
}
return 0;
}
示例6: mainImage
int mainImage(void)
{
// Declare Ipoints and other stuff
IpVec ipts;
// Make image as a Mat; convert to IplImage for OpenSURF library actions
cv::Mat mimg=cv::imread("OpenSURF/imgs/sf.jpg", CV_LOAD_IMAGE_COLOR);
IplImage iimg=mimg;
IplImage* img=&iimg;
// Detect and describe interest points in the image
clock_t start = clock();
surfDetDes(img, ipts, false, 5, 4, 2, 0.0004f);
clock_t end = clock();
std::cout<< "OpenSURF found: " << ipts.size() << " interest points" << std::endl;
std::cout<< "OpenSURF took: " << float(end - start) / CLOCKS_PER_SEC << " seconds" << std::endl;
// Draw the detected points
drawIpoints(img, ipts);
// Display the result
showImage(img);
return 0;
}
示例7: sizeof
void
linearizeDescriptors(float *dst, const IpVec &ipts)
{
for (size_t i = 0; i < ipts.size(); i++)
{
const Ipoint &p = ipts[i];
std::memcpy(dst + i * 64, p.descriptor, 64 * sizeof(float));
}
}
示例8: disTable
//通过clusterIndex进行索引的特征点相对距离矩阵
void CGVM::disTable(vector<vector<float> >& table, IpVec& Fea, double* ave)
{
double num=0;
*ave=0;
table.resize(Fea.size());
for(int i=0;i<table.size();i++) table[i].resize(Fea.size());
for(int i=0;i<Fea.size()-1;i++)
{
for(int j=i+1;j<Fea.size();j++)
{
double d;
d=sqrt(pow((Fea[i].x-Fea[j].x),2)+pow((Fea[i].y-Fea[j].y),2));
table[i][j]=d;
table[j][i]=d;
*ave+=d;
num++;
}
}
*ave/=num;
}
示例9: extract_surf
double* extract_surf(const char* filename, bool upright, int octaves, int intervals, int init_samples, double thres, int& nkeypoints){
IpVec ipts;
IplImage *img=cvLoadImage(filename);
surfDetDes(img, ipts, upright, octaves, intervals, init_samples, thres);
nkeypoints = ipts.size();
double *result = static_cast<double*>(malloc(sizeof(double) * (nkeypoints*(4+64)) ));
double *keypoints = &result[0];
double *descriptors = &result[4*nkeypoints];
for (int i =0; i<(int)ipts.size(); i++) {
keypoints[i*4] = ipts[i].x;
keypoints[i*4+1] = ipts[i].y;
keypoints[i*4+2] = ipts[i].orientation;
keypoints[i*4+3] = ipts[i].scale;
for (int j=0;j<64;j++){
descriptors[i*64+j] = ipts[i].descriptor[j];
}
}
cvReleaseImage(&img);
return result;
}
示例10: mainImage
int mainImage(IplImage *img)
{
// Declare Ipoints and other stuff
IpVec ipts;
// Detect and describe interest points in the image
clock_t start = clock();
surfDetDes(img, ipts, true, 5, 4, 2, 0.01f);
clock_t end = clock();
std::cout<< "OpenSURF found: " << ipts.size() << " interest points" << std::endl;
std::cout<< "OpenSURF took: " << float(end - start) / CLOCKS_PER_SEC << " seconds" << std::endl;
// Draw the detected points
drawIpoints(img, ipts);
// Display the result
showImage(img);
return 0;
}
示例11: init
void init(unsigned char *img,double *refhist,IpVec *refpts,double updt[][9],int w,int h)
{
double obj[8]={0};
unsigned char pix[60*80*3]={0,0};
obj[0]=w/2;obj[1]=h/2;obj[2]=80;obj[3]=60;obj[4]=obj[5]=1;obj[6]=obj[7]=0;
int x=obj[0],y=obj[1],wt=obj[2],ht=obj[3];
int p=0,l=0;
IpVec pt;
IplImage *im;
im=cvCreateImage(cvSize(w,h),IPL_DEPTH_8U,3);
memcpy(im->imageData,img,im->imageSize);
surfDetDes(im,pt,false,5,4,2,0.00004f);
p=0;
for(int k=0;k<pt.size();k++)
{
if((pt.at(k).x>=(w/2-wt/2)) && (pt.at(k).x<=(w/2+wt/2)) &&
(pt.at(k).y>=(h/2-ht/2)) && (pt.at(k).y<=(h/2+ht/2)) )
{
Ipoint tmp;
pt.at(k).x-=(w/2-wt/2);
pt.at(k).y-=(h/2-ht/2);
(*refpts).push_back(pt.at(k));
}
}
for(int i=0;i<N;i++)
{
updt[i][0]=obj[0]+(rand()%60-30);
updt[i][1]=obj[1]+(rand()%60-30);
updt[i][2]=obj[2];
updt[i][3]=obj[3];
updt[i][4]=obj[4];
updt[i][5]=obj[5];
updt[i][6]=obj[6];
updt[i][7]=obj[7];
updt[i][8]=(double)1/N;
img[3*(int)(w*updt[i][1]+updt[i][0])]=0;img[3*(int)(w*updt[i][1]+updt[i
][0])+1]=255;img[3*(int)(w*updt[i][1]+updt[i][0])+1]=0;
}
}
示例12: mainImage
int mainImage(void)
{
// Declare Ipoints and other stuff
IpVec ipts;
IplImage *img=cvLoadImage("Images/img1.jpg");
// Detect and describe interest points in the image
{
surfDetDes(img, ipts, false, 3, 4, 2, 0.0004f);
}
std::cout<< "OpenSURF found: " << ipts.size() << " interest points" << std::endl;
//std::cout<< "OpenSURF took: min/avg/max/stddev " << time_min << "/" << time_avg << "/" << time_max << "/" << stddev
// << std::endl;
// Draw the detected points
drawIpoints(img, ipts);
// Display the result
//showImage(img);
cvSaveImage("result.jpg",img);
return 0;
}
示例13: getMatchesRANSAC
//! Populate IpPairVec with matched ipts using nearest neighbour and RANSAC
Score getMatchesRANSAC(IpVec &ipts1, IpVec &ipts2, IpPairVec &matches)
{
#if RUNSWIFT
#else
timespec matchings, matchinge, verifys, verifye;
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &matchings);
#endif
float dist, d1, d2;
Ipoint *match;
float matching_score = 0;
matches.clear();
for(unsigned int i = 0; i < ipts1.size(); i++)
{
ipts1[i].x = ipts1[i].x;
d1 = d2 = FLT_MAX;
match = &ipts2[0]; // to avoid unitialized warning
for(unsigned int j = 0; j < ipts2.size(); j++)
{
ipts2[j].x = ipts2[j].x;
dist = ipts1[i] - ipts2[j];
if(dist<d1) // if this feature matches better than current best
{
d2 = d1;
d1 = dist;
match = &ipts2[j];
}
else if(dist<d2) // this feature matches better than second best
{
d2 = dist;
}
}
// If match has a d1:d2 ratio < 0.75 ipoints are a match
if(d1/d2 < 0.75)
{
// Store the match
matches.push_back(std::make_pair(ipts1[i], *match));
//Increment the matching score
matching_score += 1/d1;
}
}
float best_score = matching_score;
float best_b = -1;
float best_m = -1;
#if RUNSWIFT
#else
Ipoint::totalNumMatches = matches.size();
//At this point we have the total matches before the final number of matches
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &verifys);
#endif
if(matches.size()>1){
best_score = 0;
for(int i=0; i<ITERATIONS; i++){
//Choose random matches
int pos1 = rand() % (int)matches.size();
int pos2 = rand() % (int)matches.size();
while(pos1 == pos2) {
//Make sure that both matches are different
pos2 = rand() % (int)matches.size();
}
//Should generate a positive value
float m = (matches.at(pos2).second.x - matches.at(pos1).second.x)/(matches.at(pos2).first.x - matches.at(pos1).first.x);
//If a gradient is discarded
if (m <= 0){
continue;
}
//Calculate the translation component
float b = matches.at(pos2).second.x - m*matches.at(pos2).first.x;
float score = 0;
for(int j=0; j<(int)matches.size(); j++){
//Calculate the function x_stored,i = b_s * x_test,i + b_d
if( fabs(matches.at(j).second.x - (m*matches.at(j).first.x + b)) < PIXEL_ERROR_MARGIN)
score += 1/fabs(matches.at(j).first - matches.at(j).second);
}
if (score > best_score){
best_score = score;
best_b = b;
best_m = m;
}
}
}
// Now remove all matches who are not within this pixel error margin
//if(best_m > 0){
for(int j=0; j<(int)matches.size(); j++){
if( fabs(matches.at(j).second.x - (best_m*matches.at(j).first.x + best_b)) >= PIXEL_ERROR_MARGIN) {
matches.erase(matches.begin() + j);
j--;
}
}
//.........这里部分代码省略.........
示例14: CGVMcos
bool CGVM::CGVMcos(Point2f& CG,Point2f& CG1,IpVec& ipts0,IpVec& ipts1,IpVec& ipts0x,IpVec& ipts1x, IpVec& ipts0y,IpVec& ipts1y,int loops, int GoodSetNum,double t1,double t2,double& diff)
{
vector<int> TestSet;
vector<bool> GoodPoints;
GoodPoints.resize(ipts0.size());
struct timeval tpstart;
gettimeofday(&tpstart,0);
for(int i=0;i<ipts0.size();++i) GoodPoints[i]=true;
TestSet.resize(100);
bool GoodSet=false;
int SetSize=GoodSetNum-1;
int loop=0;
int loopX=0;
double diffSlope=0;
double sc;
int SUM;
srand(tpstart.tv_usec);
CG=Point2f(0,0);
CG1=Point2f(0,0);
Vector2D Testv,Testv1;
double Testx,Testx1;
bool GetGoodSet=true;
sc=0;
vector<Vector2D> vecVec;
ipts0x.clear();
ipts1x.clear();
ipts0y.clear();
ipts1y.clear();
// for(int i=0;i<ipts0.size();++i)
// {
// Testv=CGVM::GetVector(CG,ipts0[i]);
// Testx=CGVM::GetLength(Testv);
// Testv1=CGVM::GetVector(CG1,ipts1[i]);
// Testx1=CGVM::GetLength(Testv1);
// Vector2D Testk;
// Testk.X=Testv.X-Testv1.X;
// Testk.Y=Testv.Y-Testv1.Y;
// vecVec.push_back(Testk);
// cout<<"x "<<Testk.X<<" y "<<Testv1.Y<<endl;
// }
// CvScalar color_tab[5] =
// { CV_RGB (255, 0, 0), CV_RGB (0, 255, 0), CV_RGB (100, 100, 255), CV_RGB (255, 0, 255), CV_RGB (255, 255, 0) };
// IplImage *img = cvCreateImage (cvSize (5184, 3456), IPL_DEPTH_8U, 3);
// cvZero (img);
// for(int i=0;i<vecVec.size();++i)
// {
// CvPoint ipt;
// ipt.x=vecVec[i].X+2529;
// ipt.y=vecVec[i].Y+1728;
// cvCircle (img, ipt, 5, color_tab[1], CV_FILLED, CV_AA, 0);
// }
// cvSaveImage("z.png",img);
// return false;
while(!GoodSet)
{
SUM=0;
for(int i=0;i<=SetSize;i++)
{
bool noRepeat=false;
while(!noRepeat)
{
TestSet[i]=rand()%ipts0.size();
if(!i) break;
else
{
for(int j=0;j<=SetSize;j++)
{
if(i==j) ;
else if(TestSet[i]==TestSet[j]) break;
else
{
noRepeat=true;
break;
}
}
}
}
//.........这里部分代码省略.........
示例15: extractSURFFeatures
bool BagOfFeatures::extractSURFFeatures(bool invariant,
int octaves,
int intervals,
int step,
float thresh)
{
if(numFeatures > 0)
return false;
int i, j;
int train, valid, test, label;
char fileName[256];
IpVec temp;
IplImage* dataImage = NULL;
descrSize = 64;
for(i = 0; i < numClasses; i++)
{
// Get the distribution of data
data[i].getDataInfo(train, valid, test, label);
// Extrain the features of the training set
// For each training image
for(j = 0; j < train; j++)
{
strcpy(fileName, data[i].getDataList(j));
cout << "Loading training image: " << fileName << endl;
dataImage = cvLoadImage(fileName);
IplImage *dataGray = cvCreateImage(cvSize(dataImage->width, dataImage->height), 8, 1);
// Convert to grayscale
cvCvtColor(dataImage, dataGray, CV_BGR2GRAY);
//Resize the images
IplImage *resized = preProcessImages(dataGray, 75, 150);
// Detect the SURF features
surfDetDes(resized, temp, invariant, octaves, intervals, step, thresh);
cout << "OpenSURF found: " << temp.size() << " interest points" << endl;
// Keep track of the feature count
numFeatures += temp.size();
/*
drawIpoints(resized, temp, 3);
IplImage* display = cvCreateImage(cvSize(resized->width*4, resized->height*4), resized->depth, resized->nChannels);
cvResize(resized, display, CV_INTER_CUBIC);
cvShowImage("Extracted SURF", display);
cvWaitKey(150);
cvReleaseImage(&display);
*/
// Copy the SURF feature into the feature object
copySURFPts(trainObject[i].featureSet[j], temp, descrSize);
cvReleaseImage(&dataImage);
cvReleaseImage(&dataGray);
cvReleaseImage(&resized);
}
// Extrain the features of the validation set
// For each validation image
for(j = 0; j < valid; j++)
{
strcpy(fileName, data[i].getDataList(j+train));
cout << "Loading validation image: " << fileName << endl;
dataImage = cvLoadImage(fileName);
IplImage *dataGray = cvCreateImage(cvSize(dataImage->width, dataImage->height), 8, 1);
// Convert to grayscale
cvCvtColor(dataImage, dataGray, CV_BGR2GRAY);
//Resize the images
IplImage *resized = preProcessImages(dataGray, 75, 150);
// Detect the SURF features
surfDetDes(resized, temp, invariant, octaves, intervals, step, thresh);
cout << "OpenSURF found: " << temp.size() << " interest points" << endl;
/*
drawIpoints(resized, temp, 3);
IplImage* display = cvCreateImage(cvSize(resized->width*4, resized->height*4), resized->depth, resized->nChannels);
cvResize(resized, display, CV_INTER_CUBIC);
cvShowImage("Extracted SURF", display);
cvWaitKey(150);
cvReleaseImage(&display);
*/
// Copy the SURF feature into the feature object
copySURFPts(validObject[i].featureSet[j], temp, descrSize);
cvReleaseImage(&dataImage);
cvReleaseImage(&dataGray);
cvReleaseImage(&resized);
}
// Extrain the features of the test set
//.........这里部分代码省略.........