本文整理汇总了C++中TickMeter::getTimeSec方法的典型用法代码示例。如果您正苦于以下问题:C++ TickMeter::getTimeSec方法的具体用法?C++ TickMeter::getTimeSec怎么用?C++ TickMeter::getTimeSec使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类TickMeter
的用法示例。
在下文中一共展示了TickMeter::getTimeSec方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: detectAndDrawObjects
void detectAndDrawObjects( Mat& image, LatentSvmDetector& detector, const vector<Scalar>& colors, float overlapThreshold, int numThreads )
{
vector<LatentSvmDetector::ObjectDetection> detections;
TickMeter tm;
tm.start();
detector.detect( image, detections, overlapThreshold, numThreads);
tm.stop();
cout << "Detection time = " << tm.getTimeSec() << " sec" << endl;
const vector<string> classNames = detector.getClassNames();
CV_Assert( colors.size() == classNames.size() );
for( size_t i = 0; i < detections.size(); i++ )
{
const LatentSvmDetector::ObjectDetection& od = detections[i];
rectangle( image, od.rect, colors[od.classID], 3 );
}
// put text over the all rectangles
for( size_t i = 0; i < detections.size(); i++ )
{
const LatentSvmDetector::ObjectDetection& od = detections[i];
putText( image, classNames[od.classID], Point(od.rect.x+4,od.rect.y+13), FONT_HERSHEY_SIMPLEX, 0.55, colors[od.classID], 2 );
}
}
示例2: getFeaturePyramid
int getFeaturePyramid(IplImage * image, CvLSVMFeaturePyramid **maps,
const int bx, const int by)
{
IplImage *imgResize;
float step;
unsigned int numStep;
unsigned int maxNumCells;
unsigned int W, H;
if (image->depth == IPL_DEPTH_32F)
{
imgResize = image;
}
else
{
imgResize = cvCreateImage(cvSize(image->width, image->height),
IPL_DEPTH_32F, 3);
cvConvert(image, imgResize);
}
W = imgResize->width;
H = imgResize->height;
step = powf(2.0f, 1.0f / ((float) Lambda));
maxNumCells = W / Side_Length;
if (maxNumCells > H / Side_Length)
{
maxNumCells = H / Side_Length;
}
numStep = (int) (logf((float) maxNumCells / (5.0f)) / logf(step)) + 1;
allocFeaturePyramidObject(maps, numStep + Lambda);
#ifdef PROFILE
TickMeter tm;
tm.start();
cout << "(featurepyramid.cpp)getPathOfFeaturePyramid START " << endl;
#endif
uploadImageToGPU1D(imgResize);
getPathOfFeaturePyramidGPUStream(imgResize, step , Lambda, 0,
Side_Length / 2, bx, by, maps);
getPathOfFeaturePyramidGPUStream(imgResize, step, numStep, Lambda,
Side_Length , bx, by, maps);
cleanImageFromGPU1D();
#ifdef PROFILE
tm.stop();
cout << "(featurepyramid.cpp)getPathOfFeaturePyramid END time = "
<< tm.getTimeSec() << " sec" << endl;
#endif
if (image->depth != IPL_DEPTH_32F)
{
cvReleaseImage(&imgResize);
}
return LATENT_SVM_OK;
}
示例3: main
//.........这里部分代码省略.........
Mat colorImage0 = imread( argv[1] );
Mat depth0 = imread( argv[2], -1 );
Mat colorImage1 = imread( argv[3] );
Mat depth1 = imread( argv[4], -1 );
if( colorImage0.empty() || depth0.empty() || colorImage1.empty() || depth1.empty() )
{
cout << "Data (rgb or depth images) is empty.";
return -1;
}
int transformationType = RIGID_BODY_MOTION;
if( argc == 6 )
{
string ttype = argv[5];
if( ttype == "-rbm" )
{
transformationType = RIGID_BODY_MOTION;
}
else if ( ttype == "-r")
{
transformationType = ROTATION;
}
else if ( ttype == "-t")
{
transformationType = TRANSLATION;
}
else
{
cout << "Unsupported transformation type." << endl;
return -1;
}
}
Mat grayImage0, grayImage1, depthFlt0, depthFlt1/*in meters*/;
cvtColor( colorImage0, grayImage0, COLOR_BGR2GRAY );
cvtColor( colorImage1, grayImage1, COLOR_BGR2GRAY );
depth0.convertTo( depthFlt0, CV_32FC1, 1./1000 );
depth1.convertTo( depthFlt1, CV_32FC1, 1./1000 );
TickMeter tm;
Mat Rt;
vector<int> iterCounts(4);
iterCounts[0] = 7;
iterCounts[1] = 7;
iterCounts[2] = 7;
iterCounts[3] = 10;
vector<float> minGradMagnitudes(4);
minGradMagnitudes[0] = 12;
minGradMagnitudes[1] = 5;
minGradMagnitudes[2] = 3;
minGradMagnitudes[3] = 1;
const float minDepth = 0.f; //in meters
const float maxDepth = 4.f; //in meters
const float maxDepthDiff = 0.07f; //in meters
tm.start();
/*
bool isFound = cv::RGBDOdometry( Rt, Mat(),
grayImage0, depthFlt0, Mat(),
grayImage1, depthFlt1, Mat(),
cameraMatrix, minDepth, maxDepth, maxDepthDiff,
iterCounts, minGradMagnitudes, transformationType );
*/
int repeat = 50;
bool isFound;
for (int i=0; i<repeat; ++i)
isFound = RGBDOdometry418( Rt, Mat(),
grayImage0, depthFlt0, Mat(),
grayImage1, depthFlt1, Mat(),
cameraMatrix, minDepth, maxDepth, maxDepthDiff,
iterCounts, minGradMagnitudes, transformationType );
tm.stop();
cout << "Rt = " << Rt << endl;
cout << "Time = " << tm.getTimeSec()/repeat << " sec." << endl;
if( !isFound )
{
cout << "Rigid body motion cann't be estimated for given RGBD data." << endl;
return -1;
}
Mat warpedImage0;
warpImage<Point3_<uchar> >( colorImage0, depthFlt0, Rt, cameraMatrix, distCoeff, warpedImage0 );
imshow( "image0", colorImage0 );
imshow( "warped_image0", warpedImage0 );
imwrite("warped.png", warpedImage0);
imshow( "image1", colorImage1 );
waitKey();
return 0;
}
示例4: main
//.........这里部分代码省略.........
siftsim[ID] = similarities::compareDescriptors(siftmap[ID], imgSiftDescriptors);
orbsim[ID] = 0;//similarities::compareDescriptors(orbmap[ID], imgOrbDescriptors);
fastsim[ID] = 0;//similarities::compareDescriptors(fastmap[ID], imgFastDescriptors);
}
map<vector<float>, int> top;
bool gotone = false;
typedef map<vector<float>, int>::iterator iter;
// Choose the best ones!
for (map<vector<float>, Mat>::iterator i = imagemap.begin(); i != imagemap.end(); ++i)
{
vector<float> ID = i->first;
int sim = /* gssim[ID] + 0.5*bwsim[ID] + */ 5*surfsim[ID] + 0.3*siftsim[ID] + orbsim[ID] + fastsim[ID];
// cout << surfsim[ID] << "\t";
// cout << siftsim[ID] << "\t";
// cout << orbsim[ID] << "\t";
// cout << fastsim[ID] << endl;
if (!gotone)
{
top[ID] = sim;
gotone = true;
}
iter it = top.begin();
iter end = top.end();
int max_value = it->second;
vector<float> max_ID = it->first;
for( ; it != end; ++it)
{
int current = it->second;
if(current > max_value)
{
max_value = it->second;
max_ID = it->first;
}
}
// cout << "Sim: " << sim << "\tmax_value: " << max_value << endl;
if (top.size() < numtoreturn)
top[ID] = sim;
else
{
if (sim < max_value)
{
top[ID] = sim;
top.erase(max_ID);
}
}
}
tm.stop();
double s = tm.getTimeSec();
cout << ">\n<\n Writing top " << numtoreturn << " images ..." << endl;
int count = 1;
namedWindow("Image");
namedWindow("Match");
namedWindow("ImageBW");
namedWindow("MatchBW");
namedWindow("ImageGS");
namedWindow("MatchGS");
imshow("Image", image);
imshow("ImageBW", bwimage);
imshow("ImageGS", gsimage);
vector<KeyPoint> currentPoints;
for (iter i = top.begin(); i != top.end(); ++i)
{
vector<float> ID = i->first;
cout << " Score: "<< i->second << "\tGrayScale: " << gssim[ID] << "\tBW: " << bwsim[ID] << " \tSURF: " << surfsim[ID] << "\tSIFT: " << siftsim[ID] << endl;
string fn = "Sim_" + boost::to_string(count) + "_" + boost::to_string(i->second) + ".png";
imwrite(fn, imagemap[ID]);
count++;
normalize(bwmap[ID], bwmap[ID], 0, 255, NORM_MINMAX, CV_64F);
normalize(gsmap[ID], gsmap[ID], 0, 255, NORM_MINMAX, CV_64F);
imshow("Match", imagemap[ID]);
imshow("MatchBW", bwmap[ID]);
imshow("MatchGS", gsmap[ID]);
waitKey(0);
}
cout << ">\nComparisons took " << s << " seconds for " << imagemap.size() << " images ("
<< (int) imagemap.size()/s << " images per second)." << endl;
return 0;
}
示例5: bingQdpmRocTest
//.........这里部分代码省略.........
for (int k = 0; k < gtNumCrnt; k++) {
double matchScore = DataSetVOC::interUnio(bb, boxesGT[k]);
if (matchScore > maxMatchScore) {
maxMatchScore = matchScore;
maxMatchId = k;
}
}
uchar match = maxMatchScore > 0.5 ? 1 : 0;
if (match) {
int preDetectedIdx = gtIdx[maxMatchId];
if (preDetectedIdx >= 0) {
if (maxMatchScore > di[preDetectedIdx].matchScore) {
di[preDetectedIdx].matched = false;
gtIdx[maxMatchId] = int(j);
di[j].matchScore = maxMatchScore;
di[j].matched = true;
}
} else {
gtIdx[maxMatchId] = int(j);
di[j].matchScore = maxMatchScore;
di[j].matched = true;
}
}
#ifdef SAVE_IMAGE_RESULT
// save the result image
char buf[256];
sprintf(buf, "%2f", od.score);
Point pt(max((bb[2] + bb[0] - 85) / 2, 0), (bb[1] + bb[3]) / 2);
putText(image, buf, pt, FONT_HERSHEY_SIMPLEX, 0.5, Scalar::all(255), 3, CV_AA);
putText(image, buf, pt, FONT_HERSHEY_SIMPLEX, 0.5, Scalar::all(0), 1, CV_AA);
rectangle(image, od.rect, cv::Scalar(0, 255, 0), 2);
#endif
}
for (size_t j = 0; j < detectCount; j++) { // detections are sorted in descending order
const QUniLsvmDetector::ObjectDetection& od = detections[j];
if (di[j].matched)
matchCount++;
pScores.push_back(ScoreTp(od.score, di[j].matched));
}
#ifdef SAVE_IMAGE_RESULT
imwrite((voc.testSet[i] + "_DpmResult.png").c_str(), image);
#endif
printf("%d ", i + 1);
}
printf("\n");
}
printf("BingQdpmRocTest time = %f sec\n", tm.getTimeSec());
printf("GT %d, Matched %d/%d \n", personCount, matchCount, pScores.size());
// Calculate log-average miss rate
stable_sort(begin(pScores), end(pScores),
[](const ScoreTp &p1, const ScoreTp &p2) { return p1.first > p2.first; });
vector<float> fp(pScores.size());
for (size_t i = 0; i < fp.size(); i++)
fp[i] = !pScores[i].second;
vector<float> tp(pScores.size());
tp[0] = pScores[0].second;
for (size_t i = 1; i < tp.size(); i++)
tp[i] = tp[i - 1] + pScores[i].second;
for (size_t i = 0; i < tp.size(); i++)
tp[i] /= personCount;
for (size_t i = 1; i < fp.size(); i++)
fp[i] += fp[i - 1];
for (size_t i = 0; i < fp.size(); i++)
fp[i] /= imageCount;
sprintf(buf, "%s%03d_%03.fms_6137gt_%04dtp_%04ddt_%.0fs.m",
METHOD_NAME, max(windowLimit, 0), timeLimitMs, matchCount, pScores.size(), tm.getTimeSec());
FILE *matlabFile = fopen(buf, "w");
printVector(matlabFile, tp, "tp");
printVector(matlabFile, fp, "fp");
char *matlabContent = "tp = tp'; fp = fp';\n"
"addpath(genpath('d:/81_dataset/03_Caltech/piotr_toolbox/')); savepath;\n"
"xs1=[-inf; fp];\n"
"ys1=[0; tp];\n"
"ref=10.^(-2:.25:0);\n"
"lims=[3.1e-4 1e1 .5 1];\n"
"m=length(ref);\n"
"for i=1:m\n"
"\tj=find(xs1<=ref(i));\n"
"\tmiss(i)=ys1(j(end));\n"
"end\n"
"miss=exp(mean(log(max(1e-10,1-miss))));\n"
"show=figure();\n"
"plotRoc([fp tp],'logx',1,'logy',1,'xLbl','fppi',...\n"
"\t'lims',lims,'color','g','smooth',1,'fpTarget',ref);\n"
"title(sprintf('log-average miss rate = %.2f%%',miss*100));\n"
"savefig(['MORU' 'Roc'],show,'png');\n";
fwrite(matlabContent, strlen(matlabContent), 1, matlabFile);
fclose(matlabFile);
}
示例6: main
//.........这里部分代码省略.........
{
cout << "Can not create Odometry algorithm. Check the passed odometry name." << endl;
return -1;
}
odometry->set("cameraMatrix", cameraMatrix);
TickMeter gtm;
int count = 0;
for(int i = 0; !file.eof(); i++)
{
string str;
std::getline(file, str);
if(str.empty()) break;
if(str.at(0) == '#') continue; /* comment */
Mat image, depth;
// Read one pair (rgb and depth)
// example: 1305031453.359684 rgb/1305031453.359684.png 1305031453.374112 depth/1305031453.374112.png
#if BILATERAL_FILTER
TickMeter tm_bilateral_filter;
#endif
{
string rgbFilename = str.substr(timestampLength + 1, rgbPathLehgth );
string timestap = str.substr(0, timestampLength);
string depthFilename = str.substr(2*timestampLength + rgbPathLehgth + 3, depthPathLehgth );
image = imread(dirname + rgbFilename);
depth = imread(dirname + depthFilename, -1);
CV_Assert(!image.empty());
CV_Assert(!depth.empty());
CV_Assert(depth.type() == CV_16UC1);
cout << i << " " << rgbFilename << " " << depthFilename << endl;
// scale depth
Mat depth_flt;
depth.convertTo(depth_flt, CV_32FC1, 1.f/5000.f);
#if not BILATERAL_FILTER
depth_flt.setTo(std::numeric_limits<float>::quiet_NaN(), depth == 0);
depth = depth_flt;
#else
tm_bilateral_filter.start();
depth = Mat(depth_flt.size(), CV_32FC1, Scalar(0));
const double depth_sigma = 0.03;
const double space_sigma = 4.5; // in pixels
Mat invalidDepthMask = depth_flt == 0.f;
depth_flt.setTo(-5*depth_sigma, invalidDepthMask);
bilateralFilter(depth_flt, depth, -1, depth_sigma, space_sigma);
depth.setTo(std::numeric_limits<float>::quiet_NaN(), invalidDepthMask);
tm_bilateral_filter.stop();
cout << "Time filter " << tm_bilateral_filter.getTimeSec() << endl;
#endif
timestamps.push_back( timestap );
}
{
Mat gray;
cvtColor(image, gray, CV_BGR2GRAY);
frame_curr->image = gray;
frame_curr->depth = depth;
Mat Rt;
if(!Rts.empty())
{
TickMeter tm;
tm.start();
gtm.start();
bool res = odometry->compute(frame_curr, frame_prev, Rt);
gtm.stop();
tm.stop();
count++;
cout << "Time " << tm.getTimeSec() << endl;
#if BILATERAL_FILTER
cout << "Time ratio " << tm_bilateral_filter.getTimeSec() / tm.getTimeSec() << endl;
#endif
if(!res)
Rt = Mat::eye(4,4,CV_64FC1);
}
if( Rts.empty() )
Rts.push_back(Mat::eye(4,4,CV_64FC1));
else
{
Mat& prevRt = *Rts.rbegin();
cout << "Rt " << Rt << endl;
Rts.push_back( prevRt * Rt );
}
if(!frame_prev.empty())
frame_prev->release();
std::swap(frame_prev, frame_curr);
}
}
std::cout << "Average time " << gtm.getTimeSec()/count << std::endl;
writeResults(argv[2], timestamps, Rts);
return 0;
}
示例7: main
//.........这里部分代码省略.........
for (int k = 0; k < gtNumCrnt; k++) {
double matchScore = DataSetVOC::interUnio(bb, boxesGT[k]);
if (matchScore > maxMatchScore) {
maxMatchScore = matchScore;
maxMatchId = k;
}
}
uchar match = maxMatchScore > 0.5 ? 1 : 0;
if (match) {
int preDetectedIdx = gtIdx[maxMatchId];
if (preDetectedIdx >= 0) {
if (maxMatchScore > di[preDetectedIdx].matchScore) {
di[preDetectedIdx].matched = false;
gtIdx[maxMatchId] = int(j);
di[j].matchScore = maxMatchScore;
di[j].matched = true;
}
} else {
gtIdx[maxMatchId] = int(j);
di[j].matchScore = maxMatchScore;
di[j].matched = true;
}
}
}
for (size_t j = 0; j < detectCount; j++) {
const QUniLsvmDetector::ObjectDetection& od = detections[j];
if (di[j].matched)
matchCount++;
pScores.push_back(ScoreTp(od.score, di[j].matched));
}
#ifdef SAVE_YML
vector<Vec4i> newBoxesGT;
for (size_t j = 0; j < gtIdx.size(); j++) {
if (gtIdx[j] < 0)
continue;
newBoxesGT.push_back(boxesGT[j]);
}
if (newBoxesGT.size() > 0) {
// name
// h, w
string name = _S(voc.testSet[i]);
writePersonYml(name, image.rows, image.cols, newBoxesGT);
}
#endif
}
printf("QdpmRocTest time = %f sec\n", tm.getTimeSec());
printf("GT %d, Matched %d/%d \n", personCount, matchCount, pScores.size());
// Calculate log-average miss rate
stable_sort(begin(pScores), end(pScores),
[](const ScoreTp &p1, const ScoreTp &p2) { return p1.first > p2.first; });
vector<float> fp(pScores.size());
for (size_t i = 0; i < fp.size(); i++)
fp[i] = !pScores[i].second;
vector<float> tp(pScores.size());
tp[0] = pScores[0].second;
for (size_t i = 1; i < tp.size(); i++)
tp[i] = tp[i - 1] + pScores[i].second;
for (size_t i = 0; i < tp.size(); i++)
tp[i] /= personCount;
for (size_t i = 1; i < fp.size(); i++)
fp[i] += fp[i - 1];
for (size_t i = 0; i < fp.size(); i++)
fp[i] /= imageCount;
FILE *matlabFile = fopen("drawLAMR.m", "w");
printVector(matlabFile, tp, "tp");
printVector(matlabFile, fp, "fp");
char *matlabContent = "tp = tp'; fp = fp';\n"
"addpath(genpath('d:/81_dataset/03_Caltech/piotr_toolbox/')); savepath;\n"
"xs1=[-inf; fp];\n"
"ys1=[0; tp];\n"
"ref=10.^(-2:.25:0);\n"
"lims=[3.1e-4 1e1 0. 1];\n"
"m=length(ref);\n"
"for i=1:m\n"
"\tj=find(xs1<=ref(i));\n"
"\tmiss(i)=ys1(j(end));\n"
"end\n"
"miss=exp(mean(log(max(1e-10,1-miss))));\n"
"show=figure();\n"
"plotRoc([fp tp],'logx',1,'logy',1,'xLbl','fppi',...\n"
"\t'lims',lims,'color','g','smooth',1,'fpTarget',ref);\n"
"title(sprintf('log-average miss rate = %.2f%%',miss*100));\n"
"savefig(['MORU' 'Roc'],show,'png');\n";
fwrite(matlabContent, strlen(matlabContent), 1, matlabFile);
fclose(matlabFile);
return 0;
}
示例8: main
//.........这里部分代码省略.........
}
}
silhoutteDistance += minDist;
}
silhoutteDistance /= silhouette.size();
chamferDistances.push_back(silhoutteDistance);
}
std::sort(chamferDistances.begin(), chamferDistances.end());
if (chamferDistances.empty())
{
chamferDistances.push_back(std::numeric_limits<float>::max());
}
cout << "Best geometric hashing pose (px): " << chamferDistances[0] << endl;
cout << "Number of initial poses: " << chamferDistances.size() << endl;
allChamferDistances.push_back(chamferDistances[0]);
geometricHashingPoseCount.push_back(chamferDistances.size());
}
if (poses_cam.size() == 0)
{
++segmentationFailuresCount;
continue;
}
if (!posesQualities.empty())
{
std::vector<float>::iterator bestDetection = std::min_element(posesQualities.begin(), posesQualities.end());
int bestDetectionIndex = std::distance(posesQualities.begin(), bestDetection);
// int detectedObjectIndex = detector.getTrainObjectIndex(detectedObjectsNames[bestDetectionIndex]);
// indicesOfRecognizedObjects.push_back(detectedObjectIndex);
cout << "Recognized object: " << detectedObjectsNames[bestDetectionIndex] << endl;
}
cout << "Recognition time: " << recognitionTime.getTimeSec() << "s" << endl;
allRecognitionTimes.push_back(recognitionTime.getTimeSec());
if (objectNames.size() == 1)
{
cout << "initial poses: " << debugInfo.initialPoses.size() << endl;
vector<PoseError> initialPoseErrors;
for (size_t i = 0 ; i < debugInfo.initialPoses.size(); ++i)
{
PoseError poseError;
evaluatePoseWithRotation(edgeModels[0], debugInfo.initialPoses[i], model2test_ground, poseError);
cout << poseError << endl;
initialPoseErrors.push_back(poseError);
// showEdgels(kinectBgrImage, edgeModels[0].points, debugInfo.initialPoses[i], kinectCamera, "gh pose");
// waitKey();
}
cout << "the end." << endl;
PoseError currentBestInitialError;
{
vector<PoseError>::iterator bestPoseIt = std::min_element(initialPoseErrors.begin(), initialPoseErrors.end());
int bestPoseIdx = std::distance(initialPoseErrors.begin(), bestPoseIt);
currentBestInitialError = initialPoseErrors[bestPoseIdx];
cout << "Best initial error: " << currentBestInitialError << endl;
bestInitialPoses.push_back(currentBestInitialError);
}
CV_Assert(poses_cam.size() == 1);
int objectIndex = 0;
initialPoseCount.push_back(poses_cam.size());
vector<PoseError> currentPoseErrors(poses_cam.size());
for (size_t i = 0 ; i < poses_cam.size(); ++i)
开发者ID:alexander-hagg,项目名称:object_recognition_transparent_objects-release,代码行数:67,代码来源:transparentExperiments.cpp
示例9: SuperRe
//.........这里部分代码省略.........
}
cc++;
}
}*/
//higres = Values(Rect(index, 0, 1, 25))*result;
//if (r<20&&c<21)
//{
char temps[20];
sprintf(temps, "%d ", index);
string s(temps);
fout << s << " ";
/*}*/
}
/*if (r < 20)
{*/
fout << endl;
/*}*/
}
/*************************************/
/*WriteFile(3025, 174, data,OUTPUT);
for (int i = 0; i <3025; i++){
delete[]data[i];
}
delete[]data;*/
/*************************************/
Mat final, finalresult;
imwrite(TEST + "addOriginalResult.png", Result*255.0);
add(lowResImg, Result, Result);
imwrite(TEST + "addResult.png", Result*255.0);
CrCb[0] = Result;
merge(CrCb, finalresult);
cvtColor(finalresult, finalMerge, CV_YCrCb2BGR);
imwrite(TEST + "addfinalResult.png", finalMerge*255.0);
tm.stop();
cout << "count=" << tm.getCounter() << ",process time=" << tm.getTimeSec() << endl;
Sleep(5000);
/*******************test data*********/
Mat Finaltest = finalMerge*255.0;
vector<Mat> testInput;
Mat* inputest = new Mat[traiNo];
for (int i = 0; i < traiNo; i++)
{
inputest[i] = imread(TRAINFILENAME[i],1);
}
for (int i = 0; i < traiNo; i++)
{
Mat a = Mat::zeros(Finaltest.rows, Finaltest.cols + inputest[i].cols, CV_32FC3);
Finaltest.copyTo(a(Rect(0, 0, Finaltest.cols, Finaltest.rows)));
inputest[i].convertTo(inputest[i], CV_32FC3);
inputest[i].copyTo(a(Rect(Finaltest.cols, 0, inputest[i].cols, inputest[i].rows)));
testInput.push_back(a);
}
int *sumtemp = new int[traiNo];
for (int n = 0; n < traiNo; n++)
{
sumtemp[n] = 0;
}
int width = inputImage.cols, height = inputImage.rows;
map<int, Point2d>::iterator itest;
itest = testPoint.begin();
for (it_map = vectorList.begin(); it_map != vectorList.end(); it_map++)
{
int index = it_map->first;
Point2d a = itest->second;
int pic = InWhichPic(index);
sumtemp[pic]++;
Point2d point = Valuse[index];
Point b = Point(a.x + 5, a.y + 5);
Point a1 = Point(point.x + 1 + inputImg.cols, point.y + 1);
Point b1 = Point(point.x + 5 + inputImg.cols, point.y + 5);
rectangle(testInput[pic], a, b, Scalar(a.y * 255 / height, 0, a.x * 255 / width), 1, 8);
rectangle(testInput[pic], a1, b1, Scalar(a.y * 255 / height, 0, a.x * 255 / width), 1, 8);
//line(testInput[pic], a, a1, Scalar(255, 0, 0), 0.1, 8);
itest++;
}
for (int i = 0; i < traiNo; i++)
{
char temps[20];
sprintf(temps, "test_%d.png", i);
string s(temps);
imwrite(TEST + temps, testInput[i]);
}
WriteINTFile(inputImage.rows, inputImage.cols, drawindex, INDEX);
WritePOINTFile(inputImage.rows, inputImage.cols, draw, POINTfile);
for (int n = 0; n < traiNo; n++)
{
fout << sumtemp[n] << endl;
}
fout.close();
/******************test data**********/
//CrCb.clear();
vectorList.clear();
/*cvMerge(Y, Cr, Cb, NULL, frame)
cvtColor(frame, frame, CV_YCrCb2BGR)*/
}
示例10: main
int main( int argc, const char **argv )
{
CommandLineParser parser( argc, argv, keys );
parser.about( "Global Patch Collider evaluation tool" );
if ( parser.has( "help" ) )
{
parser.printMessage();
return 0;
}
String fromPath = parser.get< String >( 0 );
String toPath = parser.get< String >( 1 );
String gtPath = parser.get< String >( 2 );
String outPath = parser.get< String >( 3 );
const bool useOpenCL = parser.has( "gpu" );
String forestDumpPath = parser.get< String >( "forest" );
if ( !parser.check() )
{
parser.printErrors();
return 1;
}
if ( !fileProbe( forestDumpPath.c_str() ) )
{
std::cerr << "Can't open the file with a trained model: `" << forestDumpPath
<< "`.\nYou can obtain this file either by manually training the model using another tool with *_train suffix or by "
"downloading one of the files trained on some publicly available dataset from "
"here:\nhttps://drive.google.com/open?id=0B7Hb8cfuzrIIZDFscXVYd0NBNFU"
<< std::endl;
return 1;
}
ocl::setUseOpenCL( useOpenCL );
Ptr< optflow::GPCForest< nTrees > > forest = Algorithm::load< optflow::GPCForest< nTrees > >( forestDumpPath );
Mat from = imread( fromPath );
Mat to = imread( toPath );
Mat gt = optflow::readOpticalFlow( gtPath );
std::vector< std::pair< Point2i, Point2i > > corr;
TickMeter meter;
meter.start();
forest->findCorrespondences( from, to, corr, optflow::GPCMatchingParams( useOpenCL ) );
meter.stop();
std::cout << "Found " << corr.size() << " matches." << std::endl;
std::cout << "Time: " << meter.getTimeSec() << " sec." << std::endl;
double error = 0;
Mat dispErr = Mat::zeros( from.size(), CV_32FC3 );
dispErr = Scalar( 0, 0, 1 );
Mat disp = Mat::zeros( from.size(), CV_32FC3 );
disp = Scalar( 0, 0, 1 );
for ( size_t i = 0; i < corr.size(); ++i )
{
const Point2f a = corr[i].first;
const Point2f b = corr[i].second;
const Point2f c = a + gt.at< Point2f >( corr[i].first.y, corr[i].first.x );
error += normL2( b - c );
circle( disp, a, 3, getFlowColor( b - a ), -1 );
circle( dispErr, a, 3, getFlowColor( b - c, false, 32 ), -1 );
}
error /= corr.size();
std::cout << "Average endpoint error: " << error << " px." << std::endl;
cvtColor( disp, disp, COLOR_HSV2BGR );
cvtColor( dispErr, dispErr, COLOR_HSV2BGR );
Mat dispGroundTruth;
displayFlow( gt, dispGroundTruth );
if ( outPath.length() )
{
putText( disp, "Sparse matching: Global Patch Collider", Point2i( 24, 40 ), FONT_HERSHEY_DUPLEX, 1, Vec3b( 1, 0, 0 ), 2, LINE_AA );
char buf[256];
sprintf( buf, "Average EPE: %.2f", error );
putText( disp, buf, Point2i( 24, 80 ), FONT_HERSHEY_DUPLEX, 1, Vec3b( 1, 0, 0 ), 2, LINE_AA );
sprintf( buf, "Number of matches: %u", (unsigned)corr.size() );
putText( disp, buf, Point2i( 24, 120 ), FONT_HERSHEY_DUPLEX, 1, Vec3b( 1, 0, 0 ), 2, LINE_AA );
disp *= 255;
imwrite( outPath, disp );
return 0;
}
namedWindow( "Correspondences", WINDOW_AUTOSIZE );
imshow( "Correspondences", disp );
namedWindow( "Error", WINDOW_AUTOSIZE );
imshow( "Error", dispErr );
namedWindow( "Ground truth", WINDOW_AUTOSIZE );
imshow( "Ground truth", dispGroundTruth );
waitKey( 0 );
return 0;
//.........这里部分代码省略.........
示例11: process
Mat Tracker::process(const Mat frame, Stats& stats)
{
TickMeter tm;
vector<KeyPoint> kp;
Mat desc;
tm.start();
detector->detectAndCompute(frame, noArray(), kp, desc);
stats.keypoints = (int)kp.size();
vector< vector<DMatch> > matches;
vector<KeyPoint> matched1, matched2;
matcher->knnMatch(first_desc, desc, matches, 2);
for(unsigned i = 0; i < matches.size(); i++) {
if(matches[i][0].distance < nn_match_ratio * matches[i][1].distance) {
matched1.push_back(first_kp[matches[i][0].queryIdx]);
matched2.push_back( kp[matches[i][0].trainIdx]);
}
}
stats.matches = (int)matched1.size();
Mat inlier_mask, homography;
vector<KeyPoint> inliers1, inliers2;
vector<DMatch> inlier_matches;
if(matched1.size() >= 4) {
homography = findHomography(Points(matched1), Points(matched2),
RANSAC, ransac_thresh, inlier_mask);
}
tm.stop();
stats.fps = 1. / tm.getTimeSec();
if(matched1.size() < 4 || homography.empty()) {
Mat res;
hconcat(first_frame, frame, res);
stats.inliers = 0;
stats.ratio = 0;
return res;
}
for(unsigned i = 0; i < matched1.size(); i++) {
if(inlier_mask.at<uchar>(i)) {
int new_i = static_cast<int>(inliers1.size());
inliers1.push_back(matched1[i]);
inliers2.push_back(matched2[i]);
inlier_matches.push_back(DMatch(new_i, new_i, 0));
}
}
stats.inliers = (int)inliers1.size();
stats.ratio = stats.inliers * 1.0 / stats.matches;
vector<Point2f> new_bb;
perspectiveTransform(object_bb, new_bb, homography);
Mat frame_with_bb = frame.clone();
if(stats.inliers >= bb_min_inliers) {
drawBoundingBox(frame_with_bb, new_bb);
}
Mat res;
drawMatches(first_frame, inliers1, frame_with_bb, inliers2,
inlier_matches, res,
Scalar(255, 0, 0), Scalar(255, 0, 0));
return res;
}
示例12: run
void TaskManager::run(string groundTruthFile)
{
//run on the train data or on the test data
bool useGroundTruth = !groundTruthFile.empty();
cout << "Processing from " << from << " to " << to << endl << endl;
answers.clear();
Answers::type rightAnswers;
if (useGroundTruth)
{
Answers::loadAnswers(groundTruthFile, rightAnswers);
const int inlierLabel = 0;
vector<int> fullPanoramaMask(images_count, inlierLabel);
for (int i = from; i <= to; ++i)
if (rightAnswers.find(i) == rightAnswers.end())
rightAnswers.insert(make_pair(i, fullPanoramaMask));
}
int total = to - from + 1;
vector < vector<KruskalGrouper::Grouping> > groupings(total);
float minIncorrectDistance = std::numeric_limits<float>::max();
int minIncorrectDistanceSeriaIdx = -1;
#pragma omp parallel for schedule(dynamic, 5)
for (int i = from; i <= to; ++i)
{
cout << "Seria #" << i << "\t" << endl;
TickMeter time;
time.start();
OnePanoSolver solver(folder, i, cache_folder);
Mat diff;
#if 0 // cross-check only
bool found = solver.launch(groupings[i - from], diff, int());
#else
solver.launch(groupings[i - from], diff);
#endif
time.stop();
cout << "Time: " << time.getTimeSec() << "s" << endl;
if (useGroundTruth)
{
vector<int> right = rightAnswers[i];
for (int j = 0; j < diff.rows; j++)
{
for (int k = j + 1; k < diff.cols; k++)
{
if (right[j] != right[k])
{
CV_Assert(diff.type() == CV_32FC1);
if (diff.at<float> (j, k) <= minIncorrectDistance)
{
minIncorrectDistance = diff.at<float> (j, k);
minIncorrectDistanceSeriaIdx = i;
}
}
}
}
}
}
int bestScore = -1;
double bestThreshold = -1;
const int minPanoSize = 3;
if (useGroundTruth)
{
for (size_t i = 0; i < groupings.size(); i++)
{
int curBestScore = -1;
double curBestThreshold = -1;
for (size_t j = 1; j < groupings[i].size() - 1; j++)
{
double curThreshold = groupings[i][j].threshold;
int curScore = 0;
for (size_t k = 0; k < groupings.size(); k++)
{
vector<int> classes, answer_mask;
KruskalGrouper::group(groupings[k], curThreshold, minPanoSize, classes);
stringstream devNull;
generateOutputMaskFromClasses(classes, answer_mask, devNull);
int score = static_cast<int>(images_count - norm(Mat(answer_mask) - Mat(rightAnswers[from + k]), NORM_L1));
curScore += score;
}
if (curScore > curBestScore)
{
curBestScore = curScore;
curBestThreshold = curThreshold;
}
}
if (curBestScore > bestScore)
{
bestScore = curBestScore;
bestThreshold = curBestThreshold;
//.........这里部分代码省略.........
示例13: main
//.........这里部分代码省略.........
for (size_t i = 0; i < edgeModels.size(); ++i)
{
detector.addTrainObject(objectNames[i], edgeModels[i]);
}
vector<Point3f> testPointCloud;
//dataImporter.importPointCloud(pointCloudFilename, testPointCloud);
#ifdef SHOW_CLOUD
Mat points3d;
depthTo3d(kinectDepth, kinectCamera.cameraMatrix, points3d);
testPointCloud = points3d.reshape(3, points3d.total());
#endif
vector<PoseRT> poses_cam;
vector<float> posesQualities;
vector<string> detectedObjectsNames;
TickMeter recognitionTime;
recognitionTime.start();
Detector::DebugInfo debugInfo;
try
{
detector.detect(kinectBgrImage, kinectDepth, registrationMask, testPointCloud, poses_cam, posesQualities, detectedObjectsNames, &debugInfo);
}
catch(const cv::Exception &)
{
}
#ifdef USE_INITIAL_GUESS
{
PoseRT initialPose;
//3
initialPose.rvec = (Mat_<double>(3, 1) << -0.8356714356174999, 0.08672943393358865, 0.1875608929524414);
initialPose.tvec = (Mat_<double>(3, 1) << -0.0308572565967134, 0.1872369696442459, 0.8105566363422957);
poses_cam.push_back(initialPose);
//TODO: move up
posesQualities.push_back(1.0f);
GlassSegmentator glassSegmentator(params.glassSegmentationParams);
Mat glassMask;
int numberOfComponents;
glassSegmentator.segment(kinectBgrImage, kinectDepth, registrationMask, numberOfComponents, glassMask);
showSegmentation(kinectBgrImage, glassMask);
transpod::PoseEstimator poseEstimator(kinectCamera);
poseEstimator.setModel(edgeModels[0]);
Vec4f tablePlane;
computeTableOrientationByFiducials(kinectCamera, kinectBgrImage, tablePlane);
poseEstimator.refinePosesBySupportPlane(kinectBgrImage, glassMask, tablePlane, poses_cam, posesQualities);
Mat finalVisualization = kinectBgrImage.clone();
poseEstimator.visualize(poses_cam[0], finalVisualization);
imshow("estimated poses", finalVisualization);
waitKey();
}
#endif
recognitionTime.stop();
cout << "Recognition time: " << recognitionTime.getTimeSec() << "s" << endl;
Mat glassMask = debugInfo.glassMask;
imshow("glassMask", glassMask);
showSegmentation(kinectBgrImage, glassMask, "segmentation");
/*
Mat detectionResults = kinectBgrImage.clone();
detector.visualize(poses_cam, detectedObjectsNames, detectionResults);
imshow("detection", detectionResults);
waitKey();
*/
cout << "number of detected poses: " << poses_cam.size() << endl;
cout << "number of qualities: " << posesQualities.size() << endl;
if (!posesQualities.empty())
{
cout << "quality: " << posesQualities[0] << endl;
std::vector<float>::iterator bestDetection = std::min_element(posesQualities.begin(), posesQualities.end());
int bestDetectionIndex = std::distance(posesQualities.begin(), bestDetection);
cout << "Recognized object: " << detectedObjectsNames[bestDetectionIndex] << endl;
Mat detectionResults = kinectBgrImage.clone();
// vector<PoseRT> bestPose(1, poses_cam[bestDetectionIndex]);
vector<string> bestName(1, detectedObjectsNames[bestDetectionIndex]);
// detector.visualize(bestPose, bestName, detectionResults);
detector.visualize(poses_cam, detectedObjectsNames, detectionResults);
imshow("detection", detectionResults);
imwrite("detection_" + bestName[0] + ".png", detectionResults);
imwrite("testImage_" + bestName[0] + ".png", kinectBgrImage);
}
waitKey();
#ifdef SHOW_CLOUD
detector.visualize(poses_cam, detectedObjectsNames, testPointCloud);
#endif
return 0;
}