本文整理汇总了C++中TickMeter::reset方法的典型用法代码示例。如果您正苦于以下问题:C++ TickMeter::reset方法的具体用法?C++ TickMeter::reset怎么用?C++ TickMeter::reset使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类TickMeter
的用法示例。
在下文中一共展示了TickMeter::reset方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main
int main(int argc, const char* argv[])
{
if (argc != 2)
return -1;
const std::string fname(argv[1]);
cv::namedWindow("CPU", cv::WINDOW_NORMAL);
cv::namedWindow("GPU", cv::WINDOW_OPENGL);
cv::cuda::setGlDevice();
cv::Mat frame;
cv::VideoCapture reader(fname);
cv::cuda::GpuMat d_frame;
cv::Ptr<cv::cudacodec::VideoReader> d_reader = cv::cudacodec::createVideoReader(fname);
TickMeter tm;
std::vector<double> cpu_times;
std::vector<double> gpu_times;
for (;;)
{
tm.reset();
tm.start();
if (!reader.read(frame))
break;
tm.stop();
cpu_times.push_back(tm.getTimeMilli());
tm.reset();
tm.start();
if (!d_reader->nextFrame(d_frame))
break;
tm.stop();
gpu_times.push_back(tm.getTimeMilli());
cv::imshow("CPU", frame);
cv::imshow("GPU", d_frame);
if (cv::waitKey(3) > 0)
break;
}
if (!cpu_times.empty() && !gpu_times.empty())
{
std::cout << std::endl << "Results:" << std::endl;
std::sort(cpu_times.begin(), cpu_times.end());
std::sort(gpu_times.begin(), gpu_times.end());
double cpu_avg = std::accumulate(cpu_times.begin(), cpu_times.end(), 0.0) / cpu_times.size();
double gpu_avg = std::accumulate(gpu_times.begin(), gpu_times.end(), 0.0) / gpu_times.size();
std::cout << "CPU : Avg : " << cpu_avg << " ms FPS : " << 1000.0 / cpu_avg << std::endl;
std::cout << "GPU : Avg : " << gpu_avg << " ms FPS : " << 1000.0 / gpu_avg << std::endl;
}
return 0;
}
示例2: main
int main(int argc, char** argv)
{
if (argc != 3)
{
cerr << "Usage: stereo_multi_gpu <left_video> <right_video>" << endl;
return -1;
}
const int numDevices = getCudaEnabledDeviceCount();
if (numDevices != 2)
{
cerr << "Two GPUs are required" << endl;
return -1;
}
for (int i = 0; i < numDevices; ++i)
{
DeviceInfo devInfo(i);
if (!devInfo.isCompatible())
{
cerr << "CUDA module was't built for GPU #" << i << " ("
<< devInfo.name() << ", CC " << devInfo.majorVersion()
<< devInfo.minorVersion() << endl;
return -1;
}
printShortCudaDeviceInfo(i);
}
VideoCapture leftVideo(argv[1]);
VideoCapture rightVideo(argv[2]);
if (!leftVideo.isOpened())
{
cerr << "Can't open " << argv[1] << " video file" << endl;
return -1;
}
if (!rightVideo.isOpened())
{
cerr << "Can't open " << argv[2] << " video file" << endl;
return -1;
}
cout << endl;
cout << "This sample demonstrates working on one piece of data using two GPUs." << endl;
cout << "It splits input into two parts and processes them separately on different GPUs." << endl;
cout << endl;
Mat leftFrame, rightFrame;
CudaMem leftGrayFrame, rightGrayFrame;
StereoSingleGpu gpu0Alg(0);
StereoSingleGpu gpu1Alg(1);
StereoMultiGpuThread multiThreadAlg;
StereoMultiGpuStream multiStreamAlg;
Mat disparityGpu0;
Mat disparityGpu1;
Mat disparityMultiThread;
CudaMem disparityMultiStream;
Mat disparityGpu0Show;
Mat disparityGpu1Show;
Mat disparityMultiThreadShow;
Mat disparityMultiStreamShow;
TickMeter tm;
cout << "-------------------------------------------------------------------" << endl;
cout << "| Frame | GPU 0 ms | GPU 1 ms | Multi Thread ms | Multi Stream ms |" << endl;
cout << "-------------------------------------------------------------------" << endl;
for (int i = 0;; ++i)
{
leftVideo >> leftFrame;
rightVideo >> rightFrame;
if (leftFrame.empty() || rightFrame.empty())
break;
if (leftFrame.size() != rightFrame.size())
{
cerr << "Frames have different sizes" << endl;
return -1;
}
leftGrayFrame.create(leftFrame.size(), CV_8UC1);
rightGrayFrame.create(leftFrame.size(), CV_8UC1);
cvtColor(leftFrame, leftGrayFrame.createMatHeader(), COLOR_BGR2GRAY);
cvtColor(rightFrame, rightGrayFrame.createMatHeader(), COLOR_BGR2GRAY);
tm.reset(); tm.start();
gpu0Alg.compute(leftGrayFrame.createMatHeader(), rightGrayFrame.createMatHeader(),
disparityGpu0);
tm.stop();
const double gpu0Time = tm.getTimeMilli();
//.........这里部分代码省略.........
示例3: main
//.........这里部分代码省略.........
kpfn = kpfn+ ".yml";
// Create filestorage item to read from and add to map.
FileStorage store(kpfn, cv::FileStorage::READ);
FileNode n1 = store["SurfKeypoints"];
read(n1,SurfKeypoints);
FileNode n2 = store["SiftKeypoints"];
read(n2,SiftKeypoints);
FileNode n3 = store["OrbKeypoints"];
read(n3,OrbKeypoints);
FileNode n4 = store["FastKeypoints"];
read(n4,FastKeypoints);
FileNode n5 = store["SurfDescriptors"];
read(n5,SurfDescriptors);
FileNode n6 = store["SiftDescriptors"];
read(n6,SiftDescriptors);
FileNode n7 = store["OrbDescriptors"];
read(n7,OrbDescriptors);
FileNode n8 = store["FastDescriptors"];
read(n8,FastDescriptors);
store.release();
surfmap[ID] = SurfDescriptors;
siftmap[ID] = SiftDescriptors;
orbmap[ID] = OrbDescriptors;
fastmap[ID] = FastDescriptors;
}
}
TickMeter tm;
tm.reset();
cout << "<\n Analyzing Images ..." << endl;
// We have a bunch of images, now we compute their grayscale and black and white.
map<vector<float>, Mat> gsmap;
map<vector<float>, Mat> bwmap;
for (map<vector<float>, Mat>::iterator i = imagemap.begin(); i != imagemap.end(); ++i)
{
vector<float> ID = i->first;
Mat Image = i-> second;
GaussianBlur( Image, Image, Size(5,5), 0, 0, BORDER_DEFAULT );
gsmap[ID] = averageImage::getPixSumFromImage(Image, divs);
bwmap[ID] = averageImage::aboveBelow(gsmap[ID]);
}
Mat image = imread(image_name);
Mat gsimage = averageImage::getPixSumFromImage(image, divs);
Mat bwimage = averageImage::aboveBelow(gsimage);
// cout << gsimage <<endl;
imwrite("GS.png", gsimage);
namedWindow("GSIMAGE (Line 319)");
imshow("GSIMAGE (Line 319)", gsimage);
waitKey(0);
vector<KeyPoint> imgSurfKeypoints;
vector<KeyPoint> imgSiftKeypoints;
vector<KeyPoint> imgOrbKeypoints;
vector<KeyPoint> imgFastKeypoints;
Mat imgSurfDescriptors;
Mat imgSiftDescriptors;
Mat imgOrbDescriptors;
示例4: main
int main(int argc, const char* argv[])
{
if (argc != 2)
{
std::cerr << "Usage : video_writer <input video file>" << std::endl;
return -1;
}
const double FPS = 25.0;
cv::VideoCapture reader(argv[1]);
if (!reader.isOpened())
{
std::cerr << "Can't open input video file" << std::endl;
return -1;
}
cv::cuda::printShortCudaDeviceInfo(cv::cuda::getDevice());
cv::VideoWriter writer;
cv::Ptr<cv::cudacodec::VideoWriter> d_writer;
cv::Mat frame;
cv::cuda::GpuMat d_frame;
std::vector<double> cpu_times;
std::vector<double> gpu_times;
TickMeter tm;
for (int i = 1;; ++i)
{
std::cout << "Read " << i << " frame" << std::endl;
reader >> frame;
if (frame.empty())
{
std::cout << "Stop" << std::endl;
break;
}
if (!writer.isOpened())
{
std::cout << "Frame Size : " << frame.cols << "x" << frame.rows << std::endl;
std::cout << "Open CPU Writer" << std::endl;
if (!writer.open("output_cpu.avi", cv::VideoWriter::fourcc('X', 'V', 'I', 'D'), FPS, frame.size()))
return -1;
}
if (d_writer.empty())
{
std::cout << "Open CUDA Writer" << std::endl;
const cv::String outputFilename = "output_gpu.avi";
d_writer = cv::cudacodec::createVideoWriter(outputFilename, frame.size(), FPS);
}
d_frame.upload(frame);
std::cout << "Write " << i << " frame" << std::endl;
tm.reset(); tm.start();
writer.write(frame);
tm.stop();
cpu_times.push_back(tm.getTimeMilli());
tm.reset(); tm.start();
d_writer->write(d_frame);
tm.stop();
gpu_times.push_back(tm.getTimeMilli());
}
std::cout << std::endl << "Results:" << std::endl;
std::sort(cpu_times.begin(), cpu_times.end());
std::sort(gpu_times.begin(), gpu_times.end());
double cpu_avg = std::accumulate(cpu_times.begin(), cpu_times.end(), 0.0) / cpu_times.size();
double gpu_avg = std::accumulate(gpu_times.begin(), gpu_times.end(), 0.0) / gpu_times.size();
std::cout << "CPU [XVID] : Avg : " << cpu_avg << " ms FPS : " << 1000.0 / cpu_avg << std::endl;
std::cout << "GPU [H264] : Avg : " << gpu_avg << " ms FPS : " << 1000.0 / gpu_avg << std::endl;
return 0;
}