本文整理汇总了C++中TickMeter类的典型用法代码示例。如果您正苦于以下问题:C++ TickMeter类的具体用法?C++ TickMeter怎么用?C++ TickMeter使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了TickMeter类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: detectAndDrawObjects
void detectAndDrawObjects( Mat& image, LatentSvmDetector& detector, const vector<Scalar>& colors, float overlapThreshold, int numThreads )
{
vector<LatentSvmDetector::ObjectDetection> detections;
TickMeter tm;
tm.start();
detector.detect( image, detections, overlapThreshold, numThreads);
tm.stop();
cout << "Detection time = " << tm.getTimeSec() << " sec" << endl;
const vector<string> classNames = detector.getClassNames();
CV_Assert( colors.size() == classNames.size() );
for( size_t i = 0; i < detections.size(); i++ )
{
const LatentSvmDetector::ObjectDetection& od = detections[i];
rectangle( image, od.rect, colors[od.classID], 3 );
}
// put text over the all rectangles
for( size_t i = 0; i < detections.size(); i++ )
{
const LatentSvmDetector::ObjectDetection& od = detections[i];
putText( image, classNames[od.classID], Point(od.rect.x+4,od.rect.y+13), FONT_HERSHEY_SIMPLEX, 0.55, colors[od.classID], 2 );
}
}
示例2: readDatabase
bool ObjectRecognition::readDatabase(const string& dir, vector<Mat>& databaseDescriptors, vector<string>& files)
{
TickMeter tm;
tm.start();
getdir(dir,files);
string outString = "Start Reading Directory.png";
cout << outString << endl;
string extention = ".png";
vector<string>::iterator it = files.begin();
for (unsigned int i = 0;i < files.size();i++)
{
if ( files[i].size() > 4 && files[i].compare( files[i].size() - 4, 4 , extention) == 0)
{
Mat img = imread( dir + files[i] , CV_LOAD_IMAGE_GRAYSCALE );
//if( img.empty() ) cout << "Database descriptor " << files[i] << " can not be read or has no information." << endl;
//cout << files[i] << "\tRows" << img.rows << "\t Cols" << img.cols << "\t Type/Depth: " << img.depth() << endl;
img.assignTo(img, 5);
databaseDescriptors.push_back( img );
}
it++;
}
tm.stop();
cout << "End reading directory in " << tm.getTimeMilli() << " ms, of size " << DB.size() << endl;
return true;
}
示例3: main
int main(int argc, char** argv) {
using namespace std;
using namespace cv;
VideoCapture cap(0);
if (!cap.isOpened())
exit(1);
if (argc > 2) {
cap.set(CV_CAP_PROP_FRAME_WIDTH, atoi(argv[1]));
cap.set(CV_CAP_PROP_FRAME_HEIGHT, atoi(argv[2]));
}
CascadeClassifier cascade;
if (!cascade.load("haarcascade_frontalface_default.xml"))
exit(2);
const char* name = basename(argv[0]);
namedWindow(name);
for (int frame = 1;; frame++) {
static double mean = 0;
TickMeter tm;
Mat img, gray;
tm.start();
cap >> img;
cvtColor(img, gray, CV_BGR2GRAY);
equalizeHist(gray, gray);
vector<Rect> objects;
cascade.detectMultiScale(gray, objects, 1.2, 9,
CV_HAAR_DO_CANNY_PRUNING);
typedef vector<Rect>::const_iterator RCI;
for (RCI i = objects.begin(); i != objects.end(); ++i) {
Point center(cvRound(i->x+i->width/2),cvRound(i->y+i->height/2));
int radius = cvRound(i->width / 2);
circle(img, center, radius, Scalar(128,255,128), 2, 8, 0);
}
imshow(name, img);
tm.stop();
mean += tm.getTimeMilli();
if (frame % 25 == 0) {
printf("avg detect time: %.2f ms\n", mean / 25);
mean = 0;
}
switch (waitKey(10)) {
case 'q': case 27:
exit(0);
break;
}
}
}
示例4: main
int main(int argc, const char* argv[])
{
if (argc != 2)
return -1;
const std::string fname(argv[1]);
cv::namedWindow("CPU", cv::WINDOW_NORMAL);
cv::namedWindow("GPU", cv::WINDOW_OPENGL);
cv::cuda::setGlDevice();
cv::Mat frame;
cv::VideoCapture reader(fname);
cv::cuda::GpuMat d_frame;
cv::Ptr<cv::cudacodec::VideoReader> d_reader = cv::cudacodec::createVideoReader(fname);
TickMeter tm;
std::vector<double> cpu_times;
std::vector<double> gpu_times;
for (;;)
{
tm.reset();
tm.start();
if (!reader.read(frame))
break;
tm.stop();
cpu_times.push_back(tm.getTimeMilli());
tm.reset();
tm.start();
if (!d_reader->nextFrame(d_frame))
break;
tm.stop();
gpu_times.push_back(tm.getTimeMilli());
cv::imshow("CPU", frame);
cv::imshow("GPU", d_frame);
if (cv::waitKey(3) > 0)
break;
}
if (!cpu_times.empty() && !gpu_times.empty())
{
std::cout << std::endl << "Results:" << std::endl;
std::sort(cpu_times.begin(), cpu_times.end());
std::sort(gpu_times.begin(), gpu_times.end());
double cpu_avg = std::accumulate(cpu_times.begin(), cpu_times.end(), 0.0) / cpu_times.size();
double gpu_avg = std::accumulate(gpu_times.begin(), gpu_times.end(), 0.0) / gpu_times.size();
std::cout << "CPU : Avg : " << cpu_avg << " ms FPS : " << 1000.0 / cpu_avg << std::endl;
std::cout << "GPU : Avg : " << gpu_avg << " ms FPS : " << 1000.0 / gpu_avg << std::endl;
}
return 0;
}
示例5: loadImageDB
bool ObjectRecognition::loadImageDB()
{
TickMeter tm;
tm.start();
vector<string> files;
getdir(DBdirName,files);
string extention = ".png";
vector<string>::iterator it = files.begin();
vector<Mat> descriptorDatabase;
for (unsigned int i = 0;i < files.size();i++)
{
if ( files[i].size() > 4 && files[i].compare( files[i].size() - 4, 4 , extention) == 0)
{
DBobj DBentry;
DBentry.name = files[i];
DBentry.img = imread( DBdirName + files[i] );
if( DBentry.img.empty() ) cout << "Image: " << files[i] << " can not be read or has no information." << endl;
DBentry.img.assignTo(DBentry.img, CV_8U);
//cout << files[i] << "\tRows" << DBentry.img.rows << "\t Cols" << DBentry.img.cols << "\t Type/Depth: " << DBentry.img.depth() << endl;
detectKeypointsSingle(DBentry.img, DBentry.keypoints );
//cout << files[i] << "\t# Keypoints:" << DBentry.keypoints.size() << endl;
if (DBentry.keypoints.size() > 9)
{
computeDescriptorsSingle(DBentry.img, DBentry.keypoints, DBentry.description);
//cout << files[i] << "\t# of Descriptors: " << DBentry.description.rows << "\t# of Dimensions for descriptor: " << DBentry.description.cols
// << "\tType/depth: " << DBentry.description.type() << " | " << DBentry.description.depth() << endl;
descriptorDatabase.push_back(DBentry.description);
DB.push_back( DBentry );
}
}
it++;
}
// Add Database to matcher program.
matcher->add(descriptorDatabase);
matcher->train();
tm.stop();
cout << "End reading directory in " << tm.getTimeMilli() << " ms, of size " << DB.size() << endl;
return true;
}
示例6: matchDescriptors
static void matchDescriptors( const Mat& queryDescriptors, const vector<Mat>& trainDescriptors,
vector<DMatch>& matches, Ptr<DescriptorMatcher>& descriptorMatcher )
{
cout << "< Set train descriptors collection in the matcher and match query descriptors to them..." << endl;
TickMeter tm;
tm.start();
descriptorMatcher->add( trainDescriptors );
descriptorMatcher->train();
tm.stop();
double buildTime = tm.getTimeMilli();
tm.start();
descriptorMatcher->match( queryDescriptors, matches );
tm.stop();
double matchTime = tm.getTimeMilli();
CV_Assert( queryDescriptors.rows == (int)matches.size() || matches.empty() );
cout << "Number of matches: " << matches.size() << endl;
cout << "Build time: " << buildTime << " ms; Match time: " << matchTime << " ms" << endl;
cout << ">" << endl;
}
示例7: main
//.........这里部分代码省略.........
}
kpfn = kpfn+ ".yml";
// Create filestorage item to read from and add to map.
FileStorage store(kpfn, cv::FileStorage::READ);
FileNode n1 = store["SurfKeypoints"];
read(n1,SurfKeypoints);
FileNode n2 = store["SiftKeypoints"];
read(n2,SiftKeypoints);
FileNode n3 = store["OrbKeypoints"];
read(n3,OrbKeypoints);
FileNode n4 = store["FastKeypoints"];
read(n4,FastKeypoints);
FileNode n5 = store["SurfDescriptors"];
read(n5,SurfDescriptors);
FileNode n6 = store["SiftDescriptors"];
read(n6,SiftDescriptors);
FileNode n7 = store["OrbDescriptors"];
read(n7,OrbDescriptors);
FileNode n8 = store["FastDescriptors"];
read(n8,FastDescriptors);
store.release();
surfmap[ID] = SurfDescriptors;
siftmap[ID] = SiftDescriptors;
orbmap[ID] = OrbDescriptors;
fastmap[ID] = FastDescriptors;
}
}
TickMeter tm;
tm.reset();
cout << "<\n Analyzing Images ..." << endl;
// We have a bunch of images, now we compute their grayscale and black and white.
map<vector<float>, Mat> gsmap;
map<vector<float>, Mat> bwmap;
for (map<vector<float>, Mat>::iterator i = imagemap.begin(); i != imagemap.end(); ++i)
{
vector<float> ID = i->first;
Mat Image = i-> second;
GaussianBlur( Image, Image, Size(5,5), 0, 0, BORDER_DEFAULT );
gsmap[ID] = averageImage::getPixSumFromImage(Image, divs);
bwmap[ID] = averageImage::aboveBelow(gsmap[ID]);
}
Mat image = imread(image_name);
Mat gsimage = averageImage::getPixSumFromImage(image, divs);
Mat bwimage = averageImage::aboveBelow(gsimage);
// cout << gsimage <<endl;
imwrite("GS.png", gsimage);
namedWindow("GSIMAGE (Line 319)");
imshow("GSIMAGE (Line 319)", gsimage);
waitKey(0);
vector<KeyPoint> imgSurfKeypoints;
vector<KeyPoint> imgSiftKeypoints;
vector<KeyPoint> imgOrbKeypoints;
vector<KeyPoint> imgFastKeypoints;
Mat imgSurfDescriptors;
Mat imgSiftDescriptors;
示例8: bingQdpmRocTest
void bingQdpmRocTest(vector<string> &dirs,
int windowLimit = -1, double timeLimitMs = -1, float ratioThreshold = -1)
{
size_t imageCount = 0;
size_t personCount = 0;
size_t matchCount = 0;
vector<ScoreTp> pScores;
TickMeter tm;
vector<std::string>::const_iterator it = dirs.begin();
char buf[512];
for (; it != dirs.end(); it++) {
string dir = *it;
DataSetVOC voc(dir, true, true);
voc.loadAnnotations();
const size_t testNum = voc.testSet.size();
const char *imgPath =_S(voc.imgPathW);
// Objectness
double base = 2;
double intUionThr = 0.5;
int W = 8;
int NSS = 2;
#ifdef WINDOW_GUESS
Objectness objNess(voc, base, intUionThr, W, NSS);
objNess.loadTrainedModel(TRAIN_MODEL);
#endif
// LSVM DPM
string dpmPersonModel = "../ExtraData/latentsvmXml/person.xml";
vector<string> models;
models.push_back(dpmPersonModel);
QUniLsvmDetector detector(models);
float overlapThreshold = 0.2f;
if (ratioThreshold > 0)
detector.setRatioThreshold(ratioThreshold);
printf("%d: \n", testNum);
for (int i = 0; i < testNum; i++) {
const vector<Vec4i> &boxesGT = voc.gtTestBoxes[i];
const size_t gtNumCrnt = boxesGT.size();
if (gtNumCrnt <= 0)
continue;
imageCount++;
personCount += gtNumCrnt;
Mat image = imread(format(imgPath, _S(voc.testSet[i])));
if (image.ptr() == NULL) {
fprintf(stderr, "No JPG Image !\n");
exit(1);
}
int numPerSz = 130;
ValStructVec<float, Vec4i> boxes;
double preObj = tm.getTimeMilli();
double objTime = 0.;
#ifdef WINDOW_GUESS // window guess
tm.start();
objNess.getObjBndBoxes(image, boxes, numPerSz);
tm.stop();
objTime = tm.getTimeMilli() - preObj;
#endif
double localTimeLimitMs = timeLimitMs;
if (timeLimitMs > 0) {
localTimeLimitMs -= objTime;
if (localTimeLimitMs < 0.)
localTimeLimitMs = 0.;
}
vector<QRect> searchBoxes;
if (windowLimit > 0) {
for (int j = 0; j < (int)boxes.size() && j < windowLimit; j++) {
const Vec4i &bb = boxes[j];
QRect rt(bb[0], bb[1], bb[2], bb[3]);
searchBoxes.push_back(rt);
}
} else {
for (int j = 0; j < (int)boxes.size(); j++) {
const Vec4i &bb = boxes[j];
QRect rt(bb[0], bb[1], bb[2], bb[3]);
searchBoxes.push_back(rt);
}
}
tm.start();
detector.setup(image, overlapThreshold, localTimeLimitMs);
tm.stop();
vector<FeatureMapCoord> ftrMapCoords;
#ifdef WINDOW_GUESS
detector.cvtBox2FtrMapCoord(&searchBoxes, &ftrMapCoords);
#else
detector.genFullFtrMapCoord(&ftrMapCoords);
//.........这里部分代码省略.........
示例9: process
Mat Tracker::process(const Mat frame, Stats& stats)
{
TickMeter tm;
vector<KeyPoint> kp;
Mat desc;
tm.start();
detector->detectAndCompute(frame, noArray(), kp, desc);
stats.keypoints = (int)kp.size();
vector< vector<DMatch> > matches;
vector<KeyPoint> matched1, matched2;
matcher->knnMatch(first_desc, desc, matches, 2);
for(unsigned i = 0; i < matches.size(); i++) {
if(matches[i][0].distance < nn_match_ratio * matches[i][1].distance) {
matched1.push_back(first_kp[matches[i][0].queryIdx]);
matched2.push_back( kp[matches[i][0].trainIdx]);
}
}
stats.matches = (int)matched1.size();
Mat inlier_mask, homography;
vector<KeyPoint> inliers1, inliers2;
vector<DMatch> inlier_matches;
if(matched1.size() >= 4) {
homography = findHomography(Points(matched1), Points(matched2),
RANSAC, ransac_thresh, inlier_mask);
}
tm.stop();
stats.fps = 1. / tm.getTimeSec();
if(matched1.size() < 4 || homography.empty()) {
Mat res;
hconcat(first_frame, frame, res);
stats.inliers = 0;
stats.ratio = 0;
return res;
}
for(unsigned i = 0; i < matched1.size(); i++) {
if(inlier_mask.at<uchar>(i)) {
int new_i = static_cast<int>(inliers1.size());
inliers1.push_back(matched1[i]);
inliers2.push_back(matched2[i]);
inlier_matches.push_back(DMatch(new_i, new_i, 0));
}
}
stats.inliers = (int)inliers1.size();
stats.ratio = stats.inliers * 1.0 / stats.matches;
vector<Point2f> new_bb;
perspectiveTransform(object_bb, new_bb, homography);
Mat frame_with_bb = frame.clone();
if(stats.inliers >= bb_min_inliers) {
drawBoundingBox(frame_with_bb, new_bb);
}
Mat res;
drawMatches(first_frame, inliers1, frame_with_bb, inliers2,
inlier_matches, res,
Scalar(255, 0, 0), Scalar(255, 0, 0));
return res;
}
示例10: main
//.........这里部分代码省略.........
signal(SIGINT,sigint_handler);
protonect_shutdown = false;
libfreenect2::SyncMultiFrameListener listener(libfreenect2::Frame::Color);
libfreenect2::FrameMap frames;
dev->setColorFrameListener(&listener);
dev->start();
std::cout << "device serial: " << dev->getSerialNumber() << std::endl;
std::cout << "device firmware: " << dev->getFirmwareVersion() << std::endl;
libfreenect2::Registration* registration = new libfreenect2::Registration(dev->getIrCameraParams(), dev->getColorCameraParams());
/////////////////// END KINECT /////////////////
while(!protonect_shutdown)
{
listener.waitForNewFrame(frames);
libfreenect2::Frame *rgb = frames[libfreenect2::Frame::Color];
cv::Mat k_rgb = cv::Mat(rgb->height, rgb->width, CV_8UC4, rgb->data);
image = Mat(k_rgb);
frame_gpu.upload(k_rgb);
cuda::flip(frame_gpu,frame_gpu,1);
cv::flip(image,image,1);
cuda::cvtColor(frame_gpu,k_rgb_gpu,CV_BGRA2BGR);
convertAndResizeGPU(k_rgb_gpu, gray_gpu, resized_gpu, scaleFactor);
convertAndResizeCPU(image,image,scaleFactor);
TickMeter tm;
tm.start();
//cascade_gpu->setMaxNumObjects(2);
//cascade_gpu->setMaxObjectSize(cv::Size(224,224));
//cascade_gpu->setMinObjectSize(cv::Size(0,0));
cascade_gpu->setFindLargestObject(findLargestObject);
cascade_gpu->setScaleFactor(1.2);
cascade_gpu->setMinNeighbors((filterRects || findLargestObject) ? 4 : 0);
cascade_gpu->detectMultiScale(resized_gpu, facesBuf_gpu);
cascade_gpu->convert(facesBuf_gpu, faces);
for (size_t i = 0; i < faces.size(); ++i)
{
//cout<< "object [" << i << "]: " << faces[i].width << " x " << faces[i].height <<endl;
rectangle(image, faces[i], Scalar(255));
cropRect = Rect(image.cols / 2, image.rows / 2,224,224);
Mat cropImg = image(cropRect).clone();
if(predictObject == true)
{
std::vector<Prediction> predictions = CaffeClassifier.Classify(cropImg,1);
/* Print the top N predictions. */
for (size_t i = 0; i < predictions.size(); ++i)
{
Prediction p = predictions[i];
std::cout << std::fixed << std::setprecision(4) << p.second << " - \"" << p.first << "\"" << std::endl;
}
predictObject = false;
}
}
示例11: main
/*
* This sample helps to evaluate odometry on TUM datasets and benchmark http://vision.in.tum.de/data/datasets/rgbd-dataset.
* At this link you can find instructions for evaluation. The sample runs some opencv odometry and saves a camera trajectory
* to file of format that the benchmark requires. Saved file can be used for online evaluation.
*/
int main(int argc, char** argv)
{
if(argc != 4)
{
cout << "Format: file_with_rgb_depth_pairs trajectory_file odometry_name [Rgbd or ICP or RgbdICP]" << endl;
return -1;
}
vector<string> timestamps;
vector<Mat> Rts;
const string filename = argv[1];
ifstream file( filename.c_str() );
if( !file.is_open() )
return -1;
char dlmrt = '/';
size_t pos = filename.rfind(dlmrt);
string dirname = pos == string::npos ? "" : filename.substr(0, pos) + dlmrt;
const int timestampLength = 17;
const int rgbPathLehgth = 17+8;
const int depthPathLehgth = 17+10;
float fx = 525.0f, // default
fy = 525.0f,
cx = 319.5f,
cy = 239.5f;
if(filename.find("freiburg1") != string::npos)
setCameraMatrixFreiburg1(fx, fy, cx, cy);
if(filename.find("freiburg2") != string::npos)
setCameraMatrixFreiburg2(fx, fy, cx, cy);
Mat cameraMatrix = Mat::eye(3,3,CV_32FC1);
{
cameraMatrix.at<float>(0,0) = fx;
cameraMatrix.at<float>(1,1) = fy;
cameraMatrix.at<float>(0,2) = cx;
cameraMatrix.at<float>(1,2) = cy;
}
Ptr<OdometryFrame> frame_prev = new OdometryFrame(),
frame_curr = new OdometryFrame();
Ptr<Odometry> odometry = Algorithm::create<Odometry>("RGBD." + string(argv[3]) + "Odometry");
if(odometry.empty())
{
cout << "Can not create Odometry algorithm. Check the passed odometry name." << endl;
return -1;
}
odometry->set("cameraMatrix", cameraMatrix);
TickMeter gtm;
int count = 0;
for(int i = 0; !file.eof(); i++)
{
string str;
std::getline(file, str);
if(str.empty()) break;
if(str.at(0) == '#') continue; /* comment */
Mat image, depth;
// Read one pair (rgb and depth)
// example: 1305031453.359684 rgb/1305031453.359684.png 1305031453.374112 depth/1305031453.374112.png
#if BILATERAL_FILTER
TickMeter tm_bilateral_filter;
#endif
{
string rgbFilename = str.substr(timestampLength + 1, rgbPathLehgth );
string timestap = str.substr(0, timestampLength);
string depthFilename = str.substr(2*timestampLength + rgbPathLehgth + 3, depthPathLehgth );
image = imread(dirname + rgbFilename);
depth = imread(dirname + depthFilename, -1);
CV_Assert(!image.empty());
CV_Assert(!depth.empty());
CV_Assert(depth.type() == CV_16UC1);
cout << i << " " << rgbFilename << " " << depthFilename << endl;
// scale depth
Mat depth_flt;
depth.convertTo(depth_flt, CV_32FC1, 1.f/5000.f);
#if not BILATERAL_FILTER
depth_flt.setTo(std::numeric_limits<float>::quiet_NaN(), depth == 0);
depth = depth_flt;
#else
tm_bilateral_filter.start();
depth = Mat(depth_flt.size(), CV_32FC1, Scalar(0));
const double depth_sigma = 0.03;
const double space_sigma = 4.5; // in pixels
Mat invalidDepthMask = depth_flt == 0.f;
depth_flt.setTo(-5*depth_sigma, invalidDepthMask);
bilateralFilter(depth_flt, depth, -1, depth_sigma, space_sigma);
depth.setTo(std::numeric_limits<float>::quiet_NaN(), invalidDepthMask);
tm_bilateral_filter.stop();
//.........这里部分代码省略.........
示例12: main
int main(int argc, const char* argv[])
{
CommandLineParser cmd(argc, argv,
"{ image i | ../data/pic1.png | input image }"
"{ template t | templ.png | template image }"
"{ full | | estimate scale and rotation }"
"{ gpu | | use gpu version }"
"{ minDist | 100 | minimum distance between the centers of the detected objects }"
"{ levels | 360 | R-Table levels }"
"{ votesThreshold | 30 | the accumulator threshold for the template centers at the detection stage. The smaller it is, the more false positions may be detected }"
"{ angleThresh | 10000 | angle votes treshold }"
"{ scaleThresh | 1000 | scale votes treshold }"
"{ posThresh | 100 | position votes threshold }"
"{ dp | 2 | inverse ratio of the accumulator resolution to the image resolution }"
"{ minScale | 0.5 | minimal scale to detect }"
"{ maxScale | 2 | maximal scale to detect }"
"{ scaleStep | 0.05 | scale step }"
"{ minAngle | 0 | minimal rotation angle to detect in degrees }"
"{ maxAngle | 360 | maximal rotation angle to detect in degrees }"
"{ angleStep | 1 | angle step in degrees }"
"{ maxBufSize | 1000 | maximal size of inner buffers }"
"{ help h ? | | print help message }"
);
cmd.about("This program demonstrates arbitary object finding with the Generalized Hough transform.");
if (cmd.has("help"))
{
cmd.printMessage();
return 0;
}
const string templName = cmd.get<string>("template");
const string imageName = cmd.get<string>("image");
const bool full = cmd.has("full");
const bool useGpu = cmd.has("gpu");
const double minDist = cmd.get<double>("minDist");
const int levels = cmd.get<int>("levels");
const int votesThreshold = cmd.get<int>("votesThreshold");
const int angleThresh = cmd.get<int>("angleThresh");
const int scaleThresh = cmd.get<int>("scaleThresh");
const int posThresh = cmd.get<int>("posThresh");
const double dp = cmd.get<double>("dp");
const double minScale = cmd.get<double>("minScale");
const double maxScale = cmd.get<double>("maxScale");
const double scaleStep = cmd.get<double>("scaleStep");
const double minAngle = cmd.get<double>("minAngle");
const double maxAngle = cmd.get<double>("maxAngle");
const double angleStep = cmd.get<double>("angleStep");
const int maxBufSize = cmd.get<int>("maxBufSize");
if (!cmd.check())
{
cmd.printErrors();
return -1;
}
Mat templ = loadImage(templName);
Mat image = loadImage(imageName);
Ptr<GeneralizedHough> alg;
if (!full)
{
Ptr<GeneralizedHoughBallard> ballard = useGpu ? cuda::createGeneralizedHoughBallard() : createGeneralizedHoughBallard();
ballard->setMinDist(minDist);
ballard->setLevels(levels);
ballard->setDp(dp);
ballard->setMaxBufferSize(maxBufSize);
ballard->setVotesThreshold(votesThreshold);
alg = ballard;
}
else
{
Ptr<GeneralizedHoughGuil> guil = useGpu ? cuda::createGeneralizedHoughGuil() : createGeneralizedHoughGuil();
guil->setMinDist(minDist);
guil->setLevels(levels);
guil->setDp(dp);
guil->setMaxBufferSize(maxBufSize);
guil->setMinAngle(minAngle);
guil->setMaxAngle(maxAngle);
guil->setAngleStep(angleStep);
guil->setAngleThresh(angleThresh);
guil->setMinScale(minScale);
guil->setMaxScale(maxScale);
guil->setScaleStep(scaleStep);
guil->setScaleThresh(scaleThresh);
guil->setPosThresh(posThresh);
alg = guil;
}
vector<Vec4f> position;
TickMeter tm;
//.........这里部分代码省略.........
示例13: SuperRe
void SuperRe()
{
Mat inputImg = imread(INPUTFILNAME, 1);
inputImg.convertTo(inputImg, CV_32FC3);
/*************************************color********/
//vector<Scalar> color;
//color.push_back(Scalar(255, 0, 0));//0
//color.push_back(Scalar(0, 255, 0));//1
//color.push_back(Scalar(0, 0, 255));//2
//color.push_back(Scalar(192, 0, 0));//3 dark red
//color.push_back(Scalar(255, 192, 0));//4 orange
//color.push_back(Scalar(255, 255, 0));//5 yellow
//color.push_back(Scalar(146, 208, 80));//6 green
//color.push_back(Scalar(0, 176, 80));//7 dark green
//color.push_back(Scalar(0, 176, 240));//8 blue
//color.push_back(Scalar(0, 112, 192));//9 dark blue
//color.push_back(Scalar(0, 32, 96));//dark blue
//color.push_back(Scalar(112, 48, 160));//dark pink
/****************************color*****************/
//Key = cvCreateMat(446508, 174, CV_32F);
TickMeter tm;
tm.start();
vector<Mat> CrCb;
draw = new Point2d*[inputImg.rows];
drawindex = new int*[inputImg.rows];
for (int n = 0; n < inputImg.rows; n++)
{
draw[n] = new Point2d[inputImg.cols];
drawindex[n] = new int[inputImg.cols];
for (int u = 0; u < inputImg.cols; u++)
{
drawindex[n][u] = 0;
}
}
/*float** data;
data = new float*[Key.rows];
for (int i = 0; i < Key.rows; i++){
data[i] = new float[Key.cols];
for (int j = 0; j < Key.cols; j++){
data[i][j] = -500.0;
}
}
for (int row = 0; row < Key.rows; row++)
{
for (int col = 0; col < Key.cols; col++)
{
data[row][col] = Key.at<float>(row, col);
}
}
WriteFile(Key.rows, Key.cols, data, KEY);
for (int i = 0; i < Key.rows; i++){
delete[]data[i];
}
delete[]data; */
/*******************************************/
//ifstream in(KEY);
//char line[10240];
//in.getline(line, 10240);
//cout << line;
//int lineno = 0;
//while (in.getline(line, 10240)){
// stringstream ss2;
// ss2 << line;
// for (int j = 0; j < 174; j++){
// ss2 >> Key.at<float>(lineno, j);// data[lineno][j];
// }
// lineno++;
//}
/*for (int row = 0; row < Key.rows; row++)
{
for (int col = 0; col < Key.cols; col++)
{
Key.at<float>(row, col) = data[row][col];
}
}*/
for (int row = 0; row < inputImg.rows; row++)
{
for (int col = 0; col < inputImg.cols; col++)
{
inputImg.at<Vec3f>(row, col)[0] = inputImg.at<Vec3f>(row, col)[0]/255.0;
inputImg.at<Vec3f>(row, col)[1] = inputImg.at<Vec3f>(row, col)[1] / 255.0;
inputImg.at<Vec3f>(row, col)[2] = inputImg.at<Vec3f>(row, col)[2] / 255.0;
}
}
Mat inputImage, Input, rycImg, temp, Cr, Cb, srcOfMerge, finalMerge,colorSrc;
colorSrc=inputImg.clone();
GaussianBlur(inputImg, inputImage, Size(7, 7), 1, 0.0, BORDER_REPLICATE);
resize(inputImage, Input, Size(inputImage.cols / 4, inputImage.rows / 4), 0, 0, CV_INTER_CUBIC);
imwrite(TEST + "inputImage.png", Input*255.0);
cvtColor(inputImg, rycImg, CV_BGR2YCrCb);
split(rycImg, CrCb);
Mat SrcImg = CrCb[0];
GaussianBlur(CrCb[1], CrCb[1], Size(7, 7), 1, 0.0, BORDER_REPLICATE);
resize(CrCb[1], temp, Size(inputImage.cols / 4, inputImage.rows / 4), 0, 0, CV_INTER_CUBIC);
resize(temp, CrCb[1], Size(inputImage.cols, inputImage.rows), 0, 0, CV_INTER_CUBIC);
//.........这里部分代码省略.........
示例14: fullPanoramaMask
void TaskManager::run(string groundTruthFile)
{
//run on the train data or on the test data
bool useGroundTruth = !groundTruthFile.empty();
cout << "Processing from " << from << " to " << to << endl << endl;
answers.clear();
Answers::type rightAnswers;
if (useGroundTruth)
{
Answers::loadAnswers(groundTruthFile, rightAnswers);
const int inlierLabel = 0;
vector<int> fullPanoramaMask(images_count, inlierLabel);
for (int i = from; i <= to; ++i)
if (rightAnswers.find(i) == rightAnswers.end())
rightAnswers.insert(make_pair(i, fullPanoramaMask));
}
int total = to - from + 1;
vector < vector<KruskalGrouper::Grouping> > groupings(total);
float minIncorrectDistance = std::numeric_limits<float>::max();
int minIncorrectDistanceSeriaIdx = -1;
#pragma omp parallel for schedule(dynamic, 5)
for (int i = from; i <= to; ++i)
{
cout << "Seria #" << i << "\t" << endl;
TickMeter time;
time.start();
OnePanoSolver solver(folder, i, cache_folder);
Mat diff;
#if 0 // cross-check only
bool found = solver.launch(groupings[i - from], diff, int());
#else
solver.launch(groupings[i - from], diff);
#endif
time.stop();
cout << "Time: " << time.getTimeSec() << "s" << endl;
if (useGroundTruth)
{
vector<int> right = rightAnswers[i];
for (int j = 0; j < diff.rows; j++)
{
for (int k = j + 1; k < diff.cols; k++)
{
if (right[j] != right[k])
{
CV_Assert(diff.type() == CV_32FC1);
if (diff.at<float> (j, k) <= minIncorrectDistance)
{
minIncorrectDistance = diff.at<float> (j, k);
minIncorrectDistanceSeriaIdx = i;
}
}
}
}
}
}
int bestScore = -1;
double bestThreshold = -1;
const int minPanoSize = 3;
if (useGroundTruth)
{
for (size_t i = 0; i < groupings.size(); i++)
{
int curBestScore = -1;
double curBestThreshold = -1;
for (size_t j = 1; j < groupings[i].size() - 1; j++)
{
double curThreshold = groupings[i][j].threshold;
int curScore = 0;
for (size_t k = 0; k < groupings.size(); k++)
{
vector<int> classes, answer_mask;
KruskalGrouper::group(groupings[k], curThreshold, minPanoSize, classes);
stringstream devNull;
generateOutputMaskFromClasses(classes, answer_mask, devNull);
int score = static_cast<int>(images_count - norm(Mat(answer_mask) - Mat(rightAnswers[from + k]), NORM_L1));
curScore += score;
}
if (curScore > curBestScore)
{
curBestScore = curScore;
curBestThreshold = curThreshold;
}
}
if (curBestScore > bestScore)
{
bestScore = curBestScore;
bestThreshold = curBestThreshold;
//.........这里部分代码省略.........
示例15: getFeaturePyramid
int getFeaturePyramid(IplImage * image, CvLSVMFeaturePyramid **maps,
const int bx, const int by)
{
IplImage *imgResize;
float step;
unsigned int numStep;
unsigned int maxNumCells;
unsigned int W, H;
if (image->depth == IPL_DEPTH_32F)
{
imgResize = image;
}
else
{
imgResize = cvCreateImage(cvSize(image->width, image->height),
IPL_DEPTH_32F, 3);
cvConvert(image, imgResize);
}
W = imgResize->width;
H = imgResize->height;
step = powf(2.0f, 1.0f / ((float) Lambda));
maxNumCells = W / Side_Length;
if (maxNumCells > H / Side_Length)
{
maxNumCells = H / Side_Length;
}
numStep = (int) (logf((float) maxNumCells / (5.0f)) / logf(step)) + 1;
allocFeaturePyramidObject(maps, numStep + Lambda);
#ifdef PROFILE
TickMeter tm;
tm.start();
cout << "(featurepyramid.cpp)getPathOfFeaturePyramid START " << endl;
#endif
uploadImageToGPU1D(imgResize);
getPathOfFeaturePyramidGPUStream(imgResize, step , Lambda, 0,
Side_Length / 2, bx, by, maps);
getPathOfFeaturePyramidGPUStream(imgResize, step, numStep, Lambda,
Side_Length , bx, by, maps);
cleanImageFromGPU1D();
#ifdef PROFILE
tm.stop();
cout << "(featurepyramid.cpp)getPathOfFeaturePyramid END time = "
<< tm.getTimeSec() << " sec" << endl;
#endif
if (image->depth != IPL_DEPTH_32F)
{
cvReleaseImage(&imgResize);
}
return LATENT_SVM_OK;
}