本文整理汇总了C++中TickMeter::getTimeMilli方法的典型用法代码示例。如果您正苦于以下问题:C++ TickMeter::getTimeMilli方法的具体用法?C++ TickMeter::getTimeMilli怎么用?C++ TickMeter::getTimeMilli使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类TickMeter
的用法示例。
在下文中一共展示了TickMeter::getTimeMilli方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main
int main(int argc, const char* argv[])
{
if (argc != 2)
return -1;
const std::string fname(argv[1]);
cv::namedWindow("CPU", cv::WINDOW_NORMAL);
cv::namedWindow("GPU", cv::WINDOW_OPENGL);
cv::cuda::setGlDevice();
cv::Mat frame;
cv::VideoCapture reader(fname);
cv::cuda::GpuMat d_frame;
cv::Ptr<cv::cudacodec::VideoReader> d_reader = cv::cudacodec::createVideoReader(fname);
TickMeter tm;
std::vector<double> cpu_times;
std::vector<double> gpu_times;
for (;;)
{
tm.reset();
tm.start();
if (!reader.read(frame))
break;
tm.stop();
cpu_times.push_back(tm.getTimeMilli());
tm.reset();
tm.start();
if (!d_reader->nextFrame(d_frame))
break;
tm.stop();
gpu_times.push_back(tm.getTimeMilli());
cv::imshow("CPU", frame);
cv::imshow("GPU", d_frame);
if (cv::waitKey(3) > 0)
break;
}
if (!cpu_times.empty() && !gpu_times.empty())
{
std::cout << std::endl << "Results:" << std::endl;
std::sort(cpu_times.begin(), cpu_times.end());
std::sort(gpu_times.begin(), gpu_times.end());
double cpu_avg = std::accumulate(cpu_times.begin(), cpu_times.end(), 0.0) / cpu_times.size();
double gpu_avg = std::accumulate(gpu_times.begin(), gpu_times.end(), 0.0) / gpu_times.size();
std::cout << "CPU : Avg : " << cpu_avg << " ms FPS : " << 1000.0 / cpu_avg << std::endl;
std::cout << "GPU : Avg : " << gpu_avg << " ms FPS : " << 1000.0 / gpu_avg << std::endl;
}
return 0;
}
示例2: readDatabase
bool ObjectRecognition::readDatabase(const string& dir, vector<Mat>& databaseDescriptors, vector<string>& files)
{
TickMeter tm;
tm.start();
getdir(dir,files);
string outString = "Start Reading Directory.png";
cout << outString << endl;
string extention = ".png";
vector<string>::iterator it = files.begin();
for (unsigned int i = 0;i < files.size();i++)
{
if ( files[i].size() > 4 && files[i].compare( files[i].size() - 4, 4 , extention) == 0)
{
Mat img = imread( dir + files[i] , CV_LOAD_IMAGE_GRAYSCALE );
//if( img.empty() ) cout << "Database descriptor " << files[i] << " can not be read or has no information." << endl;
//cout << files[i] << "\tRows" << img.rows << "\t Cols" << img.cols << "\t Type/Depth: " << img.depth() << endl;
img.assignTo(img, 5);
databaseDescriptors.push_back( img );
}
it++;
}
tm.stop();
cout << "End reading directory in " << tm.getTimeMilli() << " ms, of size " << DB.size() << endl;
return true;
}
示例3: main
int main(int argc, char** argv) {
using namespace std;
using namespace cv;
VideoCapture cap(0);
if (!cap.isOpened())
exit(1);
if (argc > 2) {
cap.set(CV_CAP_PROP_FRAME_WIDTH, atoi(argv[1]));
cap.set(CV_CAP_PROP_FRAME_HEIGHT, atoi(argv[2]));
}
CascadeClassifier cascade;
if (!cascade.load("haarcascade_frontalface_default.xml"))
exit(2);
const char* name = basename(argv[0]);
namedWindow(name);
for (int frame = 1;; frame++) {
static double mean = 0;
TickMeter tm;
Mat img, gray;
tm.start();
cap >> img;
cvtColor(img, gray, CV_BGR2GRAY);
equalizeHist(gray, gray);
vector<Rect> objects;
cascade.detectMultiScale(gray, objects, 1.2, 9,
CV_HAAR_DO_CANNY_PRUNING);
typedef vector<Rect>::const_iterator RCI;
for (RCI i = objects.begin(); i != objects.end(); ++i) {
Point center(cvRound(i->x+i->width/2),cvRound(i->y+i->height/2));
int radius = cvRound(i->width / 2);
circle(img, center, radius, Scalar(128,255,128), 2, 8, 0);
}
imshow(name, img);
tm.stop();
mean += tm.getTimeMilli();
if (frame % 25 == 0) {
printf("avg detect time: %.2f ms\n", mean / 25);
mean = 0;
}
switch (waitKey(10)) {
case 'q': case 27:
exit(0);
break;
}
}
}
示例4: matchDescriptors
static void matchDescriptors( const Mat& queryDescriptors, const vector<Mat>& trainDescriptors,
vector<DMatch>& matches, Ptr<DescriptorMatcher>& descriptorMatcher )
{
cout << "< Set train descriptors collection in the matcher and match query descriptors to them..." << endl;
TickMeter tm;
tm.start();
descriptorMatcher->add( trainDescriptors );
descriptorMatcher->train();
tm.stop();
double buildTime = tm.getTimeMilli();
tm.start();
descriptorMatcher->match( queryDescriptors, matches );
tm.stop();
double matchTime = tm.getTimeMilli();
CV_Assert( queryDescriptors.rows == (int)matches.size() || matches.empty() );
cout << "Number of matches: " << matches.size() << endl;
cout << "Build time: " << buildTime << " ms; Match time: " << matchTime << " ms" << endl;
cout << ">" << endl;
}
示例5: loadImageDB
bool ObjectRecognition::loadImageDB()
{
TickMeter tm;
tm.start();
vector<string> files;
getdir(DBdirName,files);
string extention = ".png";
vector<string>::iterator it = files.begin();
vector<Mat> descriptorDatabase;
for (unsigned int i = 0;i < files.size();i++)
{
if ( files[i].size() > 4 && files[i].compare( files[i].size() - 4, 4 , extention) == 0)
{
DBobj DBentry;
DBentry.name = files[i];
DBentry.img = imread( DBdirName + files[i] );
if( DBentry.img.empty() ) cout << "Image: " << files[i] << " can not be read or has no information." << endl;
DBentry.img.assignTo(DBentry.img, CV_8U);
//cout << files[i] << "\tRows" << DBentry.img.rows << "\t Cols" << DBentry.img.cols << "\t Type/Depth: " << DBentry.img.depth() << endl;
detectKeypointsSingle(DBentry.img, DBentry.keypoints );
//cout << files[i] << "\t# Keypoints:" << DBentry.keypoints.size() << endl;
if (DBentry.keypoints.size() > 9)
{
computeDescriptorsSingle(DBentry.img, DBentry.keypoints, DBentry.description);
//cout << files[i] << "\t# of Descriptors: " << DBentry.description.rows << "\t# of Dimensions for descriptor: " << DBentry.description.cols
// << "\tType/depth: " << DBentry.description.type() << " | " << DBentry.description.depth() << endl;
descriptorDatabase.push_back(DBentry.description);
DB.push_back( DBentry );
}
}
it++;
}
// Add Database to matcher program.
matcher->add(descriptorDatabase);
matcher->train();
tm.stop();
cout << "End reading directory in " << tm.getTimeMilli() << " ms, of size " << DB.size() << endl;
return true;
}
示例6: main
//.........这里部分代码省略.........
}
(image.empty() ? frame : image).copyTo(frame_cpu);
frame_gpu.upload(image.empty() ? frame : image);
convertAndResize(frame_gpu, gray_gpu, resized_gpu, scaleFactor);
convertAndResize(frame_cpu, gray_cpu, resized_cpu, scaleFactor);
TickMeter tm;
tm.start();
if (useGPU)
{
cascade_gpu.visualizeInPlace = true;
cascade_gpu.findLargestObject = findLargestObject;
detections_num = cascade_gpu.detectMultiScale(resized_gpu, facesBuf_gpu, 1.2,
(filterRects || findLargestObject) ? 4 : 0);
facesBuf_gpu.colRange(0, detections_num).download(faces_downloaded);
}
else
{
Size minSize = cascade_gpu.getClassifierSize();
cascade_cpu.detectMultiScale(resized_cpu, facesBuf_cpu, 1.2,
(filterRects || findLargestObject) ? 4 : 0,
(findLargestObject ? CV_HAAR_FIND_BIGGEST_OBJECT : 0)
| CV_HAAR_SCALE_IMAGE,
minSize);
detections_num = (int)facesBuf_cpu.size();
}
if (!useGPU && detections_num)
{
for (int i = 0; i < detections_num; ++i)
{
rectangle(resized_cpu, facesBuf_cpu[i], Scalar(255));
}
}
if (useGPU)
{
resized_gpu.download(resized_cpu);
}
tm.stop();
double detectionTime = tm.getTimeMilli();
double fps = 1000 / detectionTime;
//print detections to console
cout << setfill(' ') << setprecision(2);
cout << setw(6) << fixed << fps << " FPS, " << detections_num << " det";
if ((filterRects || findLargestObject) && detections_num > 0)
{
Rect *faceRects = useGPU ? faces_downloaded.ptr<Rect>() : &facesBuf_cpu[0];
for (int i = 0; i < min(detections_num, 2); ++i)
{
cout << ", [" << setw(4) << faceRects[i].x
<< ", " << setw(4) << faceRects[i].y
<< ", " << setw(4) << faceRects[i].width
<< ", " << setw(4) << faceRects[i].height << "]";
}
}
cout << endl;
cvtColor(resized_cpu, frameDisp, CV_GRAY2BGR);
displayState(frameDisp, helpScreen, useGPU, findLargestObject, filterRects, fps);
imshow("result", frameDisp);
char key = (char)waitKey(5);
if (key == 27)
{
break;
}
switch (key)
{
case ' ':
useGPU = !useGPU;
break;
case 'm':
case 'M':
findLargestObject = !findLargestObject;
break;
case 'f':
case 'F':
filterRects = !filterRects;
break;
case '1':
scaleFactor *= 1.05;
break;
case 'q':
case 'Q':
scaleFactor /= 1.05;
break;
case 'h':
case 'H':
helpScreen = !helpScreen;
break;
}
}
示例7: main
int main(int argc, char** argv)
{
if (argc != 3)
{
cerr << "Usage: stereo_multi_gpu <left_video> <right_video>" << endl;
return -1;
}
const int numDevices = getCudaEnabledDeviceCount();
if (numDevices != 2)
{
cerr << "Two GPUs are required" << endl;
return -1;
}
for (int i = 0; i < numDevices; ++i)
{
DeviceInfo devInfo(i);
if (!devInfo.isCompatible())
{
cerr << "CUDA module was't built for GPU #" << i << " ("
<< devInfo.name() << ", CC " << devInfo.majorVersion()
<< devInfo.minorVersion() << endl;
return -1;
}
printShortCudaDeviceInfo(i);
}
VideoCapture leftVideo(argv[1]);
VideoCapture rightVideo(argv[2]);
if (!leftVideo.isOpened())
{
cerr << "Can't open " << argv[1] << " video file" << endl;
return -1;
}
if (!rightVideo.isOpened())
{
cerr << "Can't open " << argv[2] << " video file" << endl;
return -1;
}
cout << endl;
cout << "This sample demonstrates working on one piece of data using two GPUs." << endl;
cout << "It splits input into two parts and processes them separately on different GPUs." << endl;
cout << endl;
Mat leftFrame, rightFrame;
CudaMem leftGrayFrame, rightGrayFrame;
StereoSingleGpu gpu0Alg(0);
StereoSingleGpu gpu1Alg(1);
StereoMultiGpuThread multiThreadAlg;
StereoMultiGpuStream multiStreamAlg;
Mat disparityGpu0;
Mat disparityGpu1;
Mat disparityMultiThread;
CudaMem disparityMultiStream;
Mat disparityGpu0Show;
Mat disparityGpu1Show;
Mat disparityMultiThreadShow;
Mat disparityMultiStreamShow;
TickMeter tm;
cout << "-------------------------------------------------------------------" << endl;
cout << "| Frame | GPU 0 ms | GPU 1 ms | Multi Thread ms | Multi Stream ms |" << endl;
cout << "-------------------------------------------------------------------" << endl;
for (int i = 0;; ++i)
{
leftVideo >> leftFrame;
rightVideo >> rightFrame;
if (leftFrame.empty() || rightFrame.empty())
break;
if (leftFrame.size() != rightFrame.size())
{
cerr << "Frames have different sizes" << endl;
return -1;
}
leftGrayFrame.create(leftFrame.size(), CV_8UC1);
rightGrayFrame.create(leftFrame.size(), CV_8UC1);
cvtColor(leftFrame, leftGrayFrame.createMatHeader(), COLOR_BGR2GRAY);
cvtColor(rightFrame, rightGrayFrame.createMatHeader(), COLOR_BGR2GRAY);
tm.reset(); tm.start();
gpu0Alg.compute(leftGrayFrame.createMatHeader(), rightGrayFrame.createMatHeader(),
disparityGpu0);
tm.stop();
const double gpu0Time = tm.getTimeMilli();
//.........这里部分代码省略.........
示例8: run
void App::run(int argc, char **argv)
{
parseCmdArgs(argc, argv);
if (help_showed)
return;
if (getCudaEnabledDeviceCount() == 0)
throw runtime_error("No GPU found or the library is compiled without GPU support");
if (cascade_name.empty())
{
cout << "Using default cascade file...\n";
cascade_name = "data/face_detect/haarcascade_frontalface_alt.xml";
}
if (!cascade_gpu.load(cascade_name) || !cascade_cpu.load(cascade_name))
{
stringstream msg;
msg << "Could not load cascade classifier \"" << cascade_name << "\"";
throw runtime_error(msg.str());
}
if (sources.size() != 1)
{
cout << "Loading default frames source...\n";
sources.resize(1);
sources[0] = new VideoSource("data/face_detect/browser.flv");
}
Mat frame, frame_cpu, gray_cpu, resized_cpu, faces_downloaded, frameDisp;
vector<Rect> facesBuf_cpu;
GpuMat frame_gpu, gray_gpu, resized_gpu, facesBuf_gpu;
int detections_num;
while (!exited)
{
sources[0]->next(frame_cpu);
frame_gpu.upload(frame_cpu);
convertAndResize(frame_gpu, gray_gpu, resized_gpu, scaleFactor);
convertAndResize(frame_cpu, gray_cpu, resized_cpu, scaleFactor);
TickMeter tm;
tm.start();
if (useGPU)
{
cascade_gpu.visualizeInPlace = true;
cascade_gpu.findLargestObject = findLargestObject;
detections_num = cascade_gpu.detectMultiScale(resized_gpu, facesBuf_gpu, 1.2,
(filterRects || findLargestObject) ? 4 : 0);
facesBuf_gpu.colRange(0, detections_num).download(faces_downloaded);
}
else
{
Size minSize = cascade_gpu.getClassifierSize();
cascade_cpu.detectMultiScale(resized_cpu, facesBuf_cpu, 1.2,
(filterRects || findLargestObject) ? 4 : 0,
(findLargestObject ? CV_HAAR_FIND_BIGGEST_OBJECT : 0)
| CV_HAAR_SCALE_IMAGE,
minSize);
detections_num = (int)facesBuf_cpu.size();
}
if (!useGPU && detections_num)
{
for (int i = 0; i < detections_num; ++i)
{
rectangle(resized_cpu, facesBuf_cpu[i], Scalar(255));
}
}
if (useGPU)
{
resized_gpu.download(resized_cpu);
}
tm.stop();
double detectionTime = tm.getTimeMilli();
double fps = 1000 / detectionTime;
/*//print detections to console
cout << setfill(' ') << setprecision(2);
cout << setw(6) << fixed << fps << " FPS, " << detections_num << " det";
if ((filterRects || findLargestObject) && detections_num > 0)
{
Rect *faceRects = useGPU ? faces_downloaded.ptr<Rect>() : &facesBuf_cpu[0];
for (int i = 0; i < min(detections_num, 2); ++i)
{
cout << ", [" << setw(4) << faceRects[i].x
<< ", " << setw(4) << faceRects[i].y
<< ", " << setw(4) << faceRects[i].width
<< ", " << setw(4) << faceRects[i].height << "]";
}
}
cout << endl;*/
cvtColor(resized_cpu, frameDisp, CV_GRAY2BGR);
//.........这里部分代码省略.........
示例9: bingQdpmRocTest
void bingQdpmRocTest(vector<string> &dirs,
int windowLimit = -1, double timeLimitMs = -1, float ratioThreshold = -1)
{
size_t imageCount = 0;
size_t personCount = 0;
size_t matchCount = 0;
vector<ScoreTp> pScores;
TickMeter tm;
vector<std::string>::const_iterator it = dirs.begin();
char buf[512];
for (; it != dirs.end(); it++) {
string dir = *it;
DataSetVOC voc(dir, true, true);
voc.loadAnnotations();
const size_t testNum = voc.testSet.size();
const char *imgPath =_S(voc.imgPathW);
// Objectness
double base = 2;
double intUionThr = 0.5;
int W = 8;
int NSS = 2;
#ifdef WINDOW_GUESS
Objectness objNess(voc, base, intUionThr, W, NSS);
objNess.loadTrainedModel(TRAIN_MODEL);
#endif
// LSVM DPM
string dpmPersonModel = "../ExtraData/latentsvmXml/person.xml";
vector<string> models;
models.push_back(dpmPersonModel);
QUniLsvmDetector detector(models);
float overlapThreshold = 0.2f;
if (ratioThreshold > 0)
detector.setRatioThreshold(ratioThreshold);
printf("%d: \n", testNum);
for (int i = 0; i < testNum; i++) {
const vector<Vec4i> &boxesGT = voc.gtTestBoxes[i];
const size_t gtNumCrnt = boxesGT.size();
if (gtNumCrnt <= 0)
continue;
imageCount++;
personCount += gtNumCrnt;
Mat image = imread(format(imgPath, _S(voc.testSet[i])));
if (image.ptr() == NULL) {
fprintf(stderr, "No JPG Image !\n");
exit(1);
}
int numPerSz = 130;
ValStructVec<float, Vec4i> boxes;
double preObj = tm.getTimeMilli();
double objTime = 0.;
#ifdef WINDOW_GUESS // window guess
tm.start();
objNess.getObjBndBoxes(image, boxes, numPerSz);
tm.stop();
objTime = tm.getTimeMilli() - preObj;
#endif
double localTimeLimitMs = timeLimitMs;
if (timeLimitMs > 0) {
localTimeLimitMs -= objTime;
if (localTimeLimitMs < 0.)
localTimeLimitMs = 0.;
}
vector<QRect> searchBoxes;
if (windowLimit > 0) {
for (int j = 0; j < (int)boxes.size() && j < windowLimit; j++) {
const Vec4i &bb = boxes[j];
QRect rt(bb[0], bb[1], bb[2], bb[3]);
searchBoxes.push_back(rt);
}
} else {
for (int j = 0; j < (int)boxes.size(); j++) {
const Vec4i &bb = boxes[j];
QRect rt(bb[0], bb[1], bb[2], bb[3]);
searchBoxes.push_back(rt);
}
}
tm.start();
detector.setup(image, overlapThreshold, localTimeLimitMs);
tm.stop();
vector<FeatureMapCoord> ftrMapCoords;
#ifdef WINDOW_GUESS
detector.cvtBox2FtrMapCoord(&searchBoxes, &ftrMapCoords);
#else
detector.genFullFtrMapCoord(&ftrMapCoords);
//.........这里部分代码省略.........
示例10: main
//.........这里部分代码省略.........
cuda::flip(frame_gpu,frame_gpu,1);
cv::flip(image,image,1);
cuda::cvtColor(frame_gpu,k_rgb_gpu,CV_BGRA2BGR);
convertAndResizeGPU(k_rgb_gpu, gray_gpu, resized_gpu, scaleFactor);
convertAndResizeCPU(image,image,scaleFactor);
TickMeter tm;
tm.start();
//cascade_gpu->setMaxNumObjects(2);
//cascade_gpu->setMaxObjectSize(cv::Size(224,224));
//cascade_gpu->setMinObjectSize(cv::Size(0,0));
cascade_gpu->setFindLargestObject(findLargestObject);
cascade_gpu->setScaleFactor(1.2);
cascade_gpu->setMinNeighbors((filterRects || findLargestObject) ? 4 : 0);
cascade_gpu->detectMultiScale(resized_gpu, facesBuf_gpu);
cascade_gpu->convert(facesBuf_gpu, faces);
for (size_t i = 0; i < faces.size(); ++i)
{
//cout<< "object [" << i << "]: " << faces[i].width << " x " << faces[i].height <<endl;
rectangle(image, faces[i], Scalar(255));
cropRect = Rect(image.cols / 2, image.rows / 2,224,224);
Mat cropImg = image(cropRect).clone();
if(predictObject == true)
{
std::vector<Prediction> predictions = CaffeClassifier.Classify(cropImg,1);
/* Print the top N predictions. */
for (size_t i = 0; i < predictions.size(); ++i)
{
Prediction p = predictions[i];
std::cout << std::fixed << std::setprecision(4) << p.second << " - \"" << p.first << "\"" << std::endl;
}
predictObject = false;
}
}
tm.stop();
double detectionTime = tm.getTimeMilli();
double fps = 1000 / detectionTime;
displayState(image, helpScreen, useGPU, findLargestObject, filterRects, fps,scaleFactor);
imshow("result", image);
char key = (char)waitKey(5);
if (key == 27)
{
break;
}
switch (key)
{
case ' ':
useGPU = !useGPU;
break;
case 'm':
case 'M':
findLargestObject = !findLargestObject;
break;
case 'f':
case 'F':
filterRects = !filterRects;
break;
case '1':
scaleFactor *= 1.05;
break;
case 'q':
case 'Q':
scaleFactor /= 1.05;
break;
case 'h':
case 'H':
helpScreen = !helpScreen;
break;
case 'p':
case 'P':
predictObject = !predictObject;
break;
}
protonect_shutdown = protonect_shutdown || (key > 0 && ((key & 0xFF) == 27)); // shutdown on escape
listener.release(frames);
//libfreenect2::this_thread::sleep_for(libfreenect2::chrono::milliseconds(100));
}
resized_gpu.release();
// TODO: restarting ir stream doesn't work!
// TODO: bad things will happen, if frame listeners are freed before dev->stop() :(
dev->stop();
dev->close();
delete registration;
return 0;
}
示例11: main
//.........这里部分代码省略.........
Mat image = loadImage(imageName);
Ptr<GeneralizedHough> alg;
if (!full)
{
Ptr<GeneralizedHoughBallard> ballard = useGpu ? cuda::createGeneralizedHoughBallard() : createGeneralizedHoughBallard();
ballard->setMinDist(minDist);
ballard->setLevels(levels);
ballard->setDp(dp);
ballard->setMaxBufferSize(maxBufSize);
ballard->setVotesThreshold(votesThreshold);
alg = ballard;
}
else
{
Ptr<GeneralizedHoughGuil> guil = useGpu ? cuda::createGeneralizedHoughGuil() : createGeneralizedHoughGuil();
guil->setMinDist(minDist);
guil->setLevels(levels);
guil->setDp(dp);
guil->setMaxBufferSize(maxBufSize);
guil->setMinAngle(minAngle);
guil->setMaxAngle(maxAngle);
guil->setAngleStep(angleStep);
guil->setAngleThresh(angleThresh);
guil->setMinScale(minScale);
guil->setMaxScale(maxScale);
guil->setScaleStep(scaleStep);
guil->setScaleThresh(scaleThresh);
guil->setPosThresh(posThresh);
alg = guil;
}
vector<Vec4f> position;
TickMeter tm;
if (useGpu)
{
cuda::GpuMat d_templ(templ);
cuda::GpuMat d_image(image);
cuda::GpuMat d_position;
alg->setTemplate(d_templ);
tm.start();
alg->detect(d_image, d_position);
d_position.download(position);
tm.stop();
}
else
{
alg->setTemplate(templ);
tm.start();
alg->detect(image, position);
tm.stop();
}
cout << "Found : " << position.size() << " objects" << endl;
cout << "Detection time : " << tm.getTimeMilli() << " ms" << endl;
Mat out;
cv::cvtColor(image, out, COLOR_GRAY2BGR);
for (size_t i = 0; i < position.size(); ++i)
{
Point2f pos(position[i][0], position[i][1]);
float scale = position[i][2];
float angle = position[i][3];
RotatedRect rect;
rect.center = pos;
rect.size = Size2f(templ.cols * scale, templ.rows * scale);
rect.angle = angle;
Point2f pts[4];
rect.points(pts);
line(out, pts[0], pts[1], Scalar(0, 0, 255), 3);
line(out, pts[1], pts[2], Scalar(0, 0, 255), 3);
line(out, pts[2], pts[3], Scalar(0, 0, 255), 3);
line(out, pts[3], pts[0], Scalar(0, 0, 255), 3);
}
imshow("out", out);
waitKey();
return 0;
}
示例12: main
int main(int argc, char **argv)
{
CommandLineParser parser(argc, argv, keys);
if (parser.has("help"))
{
parser.printMessage();
return 0;
}
String modelFile = parser.get<String>("model");
String imageFile = parser.get<String>("image");
if (!parser.check())
{
parser.printErrors();
return 0;
}
String classNamesFile = parser.get<String>("c_names");
String resultFile = parser.get<String>("result");
//! [Read model and initialize network]
dnn::Net net = dnn::readNetFromTorch(modelFile);
//! [Prepare blob]
Mat img = imread(imageFile), input;
if (img.empty())
{
std::cerr << "Can't read image from the file: " << imageFile << std::endl;
exit(-1);
}
Size origSize = img.size();
Size inputImgSize = cv::Size(1024, 512);
if (inputImgSize != origSize)
resize(img, img, inputImgSize); //Resize image to input size
Mat inputBlob = blobFromImage(img, 1./255); //Convert Mat to image batch
//! [Prepare blob]
//! [Set input blob]
net.setInput(inputBlob, ""); //set the network input
//! [Set input blob]
TickMeter tm;
String oBlob = net.getLayerNames().back();
if (!parser.get<String>("o_blob").empty())
{
oBlob = parser.get<String>("o_blob");
}
//! [Make forward pass]
tm.start();
Mat result = net.forward(oBlob);
tm.stop();
if (!resultFile.empty()) {
CV_Assert(result.isContinuous());
ofstream fout(resultFile.c_str(), ios::out | ios::binary);
fout.write((char*)result.data, result.total() * sizeof(float));
fout.close();
}
std::cout << "Output blob: " << result.size[0] << " x " << result.size[1] << " x " << result.size[2] << " x " << result.size[3] << "\n";
std::cout << "Inference time, ms: " << tm.getTimeMilli() << std::endl;
if (parser.has("show"))
{
std::vector<String> classNames;
vector<cv::Vec3b> colors;
if(!classNamesFile.empty()) {
colors = readColors(classNamesFile, classNames);
}
Mat segm, legend;
colorizeSegmentation(result, segm, legend, classNames, colors);
Mat show;
addWeighted(img, 0.1, segm, 0.9, 0.0, show);
cv::resize(show, show, origSize, 0, 0, cv::INTER_NEAREST);
imshow("Result", show);
if(classNames.size())
imshow("Legend", legend);
waitKey();
}
return 0;
} //main
示例13: matchObsvToDB
void ObjectRecognition::matchObsvToDB(const Mat &img, string& ObjName)
{
//img.assignTo(img, CV_8U);
vector<DMatch> matches;
vector<vector<DMatch> > total_matches;
TickMeter tm;
tm.start();
vector<KeyPoint> imgKp;
Mat imgDesc;
detectKeypointsSingle(img, imgKp);
computeDescriptorsSingle(img, imgKp, imgDesc);
matcher->match( imgDesc, matches );
/*/
//Match each item in database to pic (problem is it then matches to best keypoint and need to find a way to see which image in database is best)
// I tried variance of distances but that wasn't reliable, didn't try finding var of angle but probably wouldn't be reliable either
for ( vector<DBobj>::iterator DBiter = DB.begin() ; DBiter != DB.end(); DBiter++ )
{
matcher->match( imgDesc, DBiter->description, matches);
total_matches.push_back(matches);
float mean = 0, var = 0;
for (vector<DMatch>::iterator DMiter = matches.begin(); DMiter != matches.end(); DMiter++) mean += DMiter->distance;
mean = mean / matches.size();
for (vector<DMatch>::iterator DMiter = matches.begin(); DMiter != matches.end(); DMiter++) var += (DMiter->distance - mean) * (DMiter->distance - mean);
cout << "# of Observed Matches to " << DBiter->name << " is " << matches.size() << " with a sd of: " << var << endl;
} /*/
tm.stop();
double matchTime = tm.getTimeMilli();
//for finding which picture has most matches
int numMatchesToDB [(int)DB.size()], bestMatchIdx=0;
//init array
for (int i = 0; i < (int)DB.size(); i++) numMatchesToDB[i] = 0;
//bin for finding which pic has most matches
for (vector<DMatch>::iterator DMiter = matches.begin(); DMiter != matches.end(); DMiter++)
{
numMatchesToDB[DMiter->imgIdx]++;
if ( numMatchesToDB[bestMatchIdx] < numMatchesToDB[DMiter->imgIdx] ) bestMatchIdx = DMiter->imgIdx;
//cout << "bestMatchIdx: " << numMatchesToDB[bestMatchIdx] << "\t" << numMatchesToDB[DMiter->imgIdx] << "\t" << bestMatchIdx << "\t" << DMiter->imgIdx << endl;
//cout << "Match information (queryIDx/trainIDx/imgIDx/distance): " <<
// DMiter->queryIdx << "\t" << DMiter->trainIdx << "\t" << DMiter->imgIdx << "\t" << DMiter->distance << endl;
}
cout << "Match time: " << matchTime << " ms with the best match at " << DB.at(bestMatchIdx).name << " with " << numMatchesToDB[bestMatchIdx] << " matching keypoints" << endl;
//cout << "Observed Descriptors " << imgDesc.rows << " and number of matches " << (int)matches.size() << endl;
CV_Assert( imgDesc.rows == (int)matches.size() || matches.empty() );
ObjName = DB.at(bestMatchIdx).name;
//*/ Show only bestMatchIdx pic
//preparing mask so not all keypoints are shown, only links where imgIdx (DB image position)
vector<char> mask;
mask.resize( matches.size() );
fill( mask.begin(), mask.end(), 0 );
for( size_t i = 0; i < matches.size(); i++ )
{
if( matches[i].imgIdx == bestMatchIdx )
mask[i] = 1;
}
Mat drawImg;
drawMatches( img, imgKp, DB.at(bestMatchIdx).img, DB.at(bestMatchIdx).keypoints, matches, drawImg, Scalar(255, 0, 0), Scalar(0, 255, 255), mask );
imshow(DB.at(bestMatchIdx).name, drawImg);
waitKey();
//*/
/*/ Show each match by pic
bool running = true;
Mat drawImg;
vector<char> mask;
vector<DBobj>::iterator DBiter = DB.begin();
for( size_t i = 0; running ; )
{
maskMatchesByTrainImgIdx( matches, (int)i, mask );
drawMatches( img, imgKp, DBiter->img, DBiter->keypoints, matches, drawImg, Scalar(255, 0, 0), Scalar(0, 255, 255), mask );
imshow("Matchs", drawImg);
switch ( (char) waitKey(5))
{
case 'q': case 'Q':
running = false;
break;
case 'i': case 'I':
//cout << (bool) DBiter != DB.end() << endl;
if (( DBiter != DB.end() ) && ( i < DB.size()-1 ))
//.........这里部分代码省略.........
示例14: main
int main(int argc, const char* argv[])
{
if (argc != 2)
{
std::cerr << "Usage : video_writer <input video file>" << std::endl;
return -1;
}
const double FPS = 25.0;
cv::VideoCapture reader(argv[1]);
if (!reader.isOpened())
{
std::cerr << "Can't open input video file" << std::endl;
return -1;
}
cv::cuda::printShortCudaDeviceInfo(cv::cuda::getDevice());
cv::VideoWriter writer;
cv::Ptr<cv::cudacodec::VideoWriter> d_writer;
cv::Mat frame;
cv::cuda::GpuMat d_frame;
std::vector<double> cpu_times;
std::vector<double> gpu_times;
TickMeter tm;
for (int i = 1;; ++i)
{
std::cout << "Read " << i << " frame" << std::endl;
reader >> frame;
if (frame.empty())
{
std::cout << "Stop" << std::endl;
break;
}
if (!writer.isOpened())
{
std::cout << "Frame Size : " << frame.cols << "x" << frame.rows << std::endl;
std::cout << "Open CPU Writer" << std::endl;
if (!writer.open("output_cpu.avi", cv::VideoWriter::fourcc('X', 'V', 'I', 'D'), FPS, frame.size()))
return -1;
}
if (d_writer.empty())
{
std::cout << "Open CUDA Writer" << std::endl;
const cv::String outputFilename = "output_gpu.avi";
d_writer = cv::cudacodec::createVideoWriter(outputFilename, frame.size(), FPS);
}
d_frame.upload(frame);
std::cout << "Write " << i << " frame" << std::endl;
tm.reset(); tm.start();
writer.write(frame);
tm.stop();
cpu_times.push_back(tm.getTimeMilli());
tm.reset(); tm.start();
d_writer->write(d_frame);
tm.stop();
gpu_times.push_back(tm.getTimeMilli());
}
std::cout << std::endl << "Results:" << std::endl;
std::sort(cpu_times.begin(), cpu_times.end());
std::sort(gpu_times.begin(), gpu_times.end());
double cpu_avg = std::accumulate(cpu_times.begin(), cpu_times.end(), 0.0) / cpu_times.size();
double gpu_avg = std::accumulate(gpu_times.begin(), gpu_times.end(), 0.0) / gpu_times.size();
std::cout << "CPU [XVID] : Avg : " << cpu_avg << " ms FPS : " << 1000.0 / cpu_avg << std::endl;
std::cout << "GPU [H264] : Avg : " << gpu_avg << " ms FPS : " << 1000.0 / gpu_avg << std::endl;
return 0;
}
示例15: main
/** @function main */
int main(int argc, char** argv)
{
TickMeter tm;
string detectorType = defaultDetectorType;
string descriptorType = defaultDescriptorType;
string matcherType = defaultMatcherType;
string queryFileName = defaultQueryFileName;
string trainFileName = defaultTrainFileName;
if(argc != 1 && argc != 4 && argc != 6)
{
readme(argv[0]);
return -1;
}
std::cout << argc << std::endl;
if(argc != 1)
{
detectorType = argv[1];
descriptorType = argv[2];
matcherType = argv[3];
if(argc != 4)
{
queryFileName = argv[4];
trainFileName = argv[5];
}
}
Mat trainImage = imread(trainFileName, CV_LOAD_IMAGE_GRAYSCALE);
Mat queryImage = imread(queryFileName, CV_LOAD_IMAGE_GRAYSCALE);
if(!trainImage.data || !queryImage.data)
{
std::cout << " --(!) Error reading images " << std::endl;
return -1;
}
//Create Detector Phase
Ptr<FeatureDetector> featureDetector;
Ptr<DescriptorExtractor> descriptorExtractor;
Ptr<DescriptorMatcher> descriptorMatcher;
initModule_nonfree();
if(!createDetectorDescriptorMatcher(detectorType, descriptorType, matcherType, featureDetector, descriptorExtractor, descriptorMatcher))
{
readme(argv[0]);
return -1;
}
//get keypoints phase
vector<KeyPoint> queryKeypoints;
vector<KeyPoint> trainKeypoints;
tm.start();
detectKeypoints(queryImage, queryKeypoints, featureDetector);
detectKeypoints(trainImage, trainKeypoints, featureDetector);
tm.stop();
double keypointTime = tm.getTimeMilli();
//get descriptor phase
Mat queryDescriptors;
Mat trainDescriptors;
tm.start();
computeDescriptors(queryImage, queryKeypoints, queryDescriptors, descriptorExtractor);
//computeDescriptors(trainImage, trainKeypoints, trainDescriptors, descriptorExtractor);
cv::FileStorage fs2("data.xml", cv::FileStorage::READ);
fs2["trainDescriptors"] >> trainDescriptors;
fs2.release();
tm.stop();
double descriptorTime = tm.getTimeMilli();
//matching Phase
vector<DMatch> matches;
tm.start();
matchDescriptors(trainDescriptors, queryDescriptors, matches, descriptorMatcher);
tm.stop();
double matcherTime = tm.getTimeMilli();
//show result Phase
double max_dist = 0; double min_dist = 100;
for( int i = 0; i < trainDescriptors.rows; i++ )
{
double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
vector<DMatch> good_matches;
for( int i = 0; i < trainDescriptors.rows; i++ )
{
if( matches[i].distance < 3*min_dist )
{
good_matches.push_back( matches[i]);
}
}
Mat matchesImage = showResultImages(trainImage, trainKeypoints, queryImage, queryKeypoints, matches);
Mat goodmatchesImage = showResultImages(trainImage, trainKeypoints, queryImage, queryKeypoints, good_matches);
//.........这里部分代码省略.........