本文整理汇总了C++中CPUTimer::Stop方法的典型用法代码示例。如果您正苦于以下问题:C++ CPUTimer::Stop方法的具体用法?C++ CPUTimer::Stop怎么用?C++ CPUTimer::Stop使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类CPUTimer
的用法示例。
在下文中一共展示了CPUTimer::Stop方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: CHECK
void DataLayer<Dtype>::InternalThreadEntry() {
CPUTimer batch_timer;
batch_timer.Start();
double read_time = 0;
double trans_time = 0;
CPUTimer timer;
CHECK(this->prefetch_data_.count());
CHECK(this->transformed_data_.count());
// Reshape according to the first datum of each batch
// on single input batches allows for inputs of varying dimension.
const int batch_size = this->layer_param_.data_param().batch_size();
Datum datum;
datum.ParseFromString(cursor_->value());
// Use data_transformer to infer the expected blob shape from datum.
vector<int> top_shape = this->data_transformer_->InferBlobShape(datum);
this->transformed_data_.Reshape(top_shape);
// Reshape prefetch_data according to the batch_size.
top_shape[0] = batch_size;
this->prefetch_data_.Reshape(top_shape);
Dtype* top_data = this->prefetch_data_.mutable_cpu_data();
Dtype* top_label = NULL; // suppress warnings about uninitialized variables
if (this->output_labels_) {
top_label = this->prefetch_label_.mutable_cpu_data();
}
timer.Start();
for (int item_id = 0; item_id < batch_size; ++item_id) {
// get a datum
Datum datum;
datum.ParseFromString(cursor_->value());
read_time += timer.MicroSeconds();
timer.Start();
// Apply data transformations (mirror, scale, crop...)
int offset = this->prefetch_data_.offset(item_id);
this->transformed_data_.set_cpu_data(top_data + offset);
this->data_transformer_->Transform(datum, &(this->transformed_data_));
// Copy label.
if (this->output_labels_) {
top_label[item_id] = datum.label();
}
trans_time += timer.MicroSeconds();
timer.Start();
// go to the next item.
cursor_->Next();
if (!cursor_->valid()) {
DLOG(INFO) << "Restarting data prefetching from start.";
cursor_->SeekToFirst();
}
}
timer.Stop();
batch_timer.Stop();
DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
DLOG(INFO) << " Read time: " << read_time / 1000 << " ms.";
DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
示例2: if
void FlowDataLayer<Dtype>::load_batch(Batch<Dtype>* batch) {
CPUTimer batch_timer;
batch_timer.Start();
double read_time = 0;
double trans_time = 0;
CPUTimer timer;
CHECK(batch->data_.count());
CHECK(this->transformed_data_.count());
// Reshape according to the first datum of each batch
// on single input batches allows for inputs of varying dimension.
const int batch_size = this->layer_param_.flow_data_param().batch_size();
Datum& datum = *(reader_.full().peek());
// Use data_transformer to infer the expected blob shape from datum.
vector<int> top_shape = this->data_transformer_->InferBlobShape(datum);
top_shape[0] = num_test_views_;
this->transformed_data_.Reshape(top_shape);
// Reshape batch according to the batch_size.
top_shape[0] = batch_size * num_test_views_;
batch->data_.Reshape(top_shape);
Dtype* top_data = batch->data_.mutable_cpu_data();
Dtype* top_label = NULL; // suppress warnings about uninitialized variables
if (this->output_labels_) {
top_label = batch->label_.mutable_cpu_data();
}
for (int item_id = 0; item_id < batch_size; ++item_id) {
timer.Start();
// get a datum
Datum& datum = *(reader_.full().pop("Waiting for flow data"));
read_time += timer.MicroSeconds();
// DLOG(INFO) << "number of data in full queue: " << reader_.full().size();
timer.Start();
// Apply data transformations (mirror, scale, crop...)
int offset = batch->data_.offset(item_id * num_test_views_);
this->transformed_data_.set_cpu_data(top_data + offset);
if (this->phase_ == TRAIN)
this->data_transformer_->TransformVariedSizeDatum(datum, &(this->transformed_data_));
else if (this->phase_ == TEST)
this->data_transformer_->TransformVariedSizeTestDatum(datum, &(this->transformed_data_), num_test_views_);
// Copy label.
if (this->output_labels_) {
top_label[item_id] = datum.label();
}
trans_time += timer.MicroSeconds();
reader_.free().push(const_cast<Datum*>(&datum));
}
timer.Stop();
batch_timer.Stop();
DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
DLOG(INFO) << " Read time: " << read_time / 1000 << " ms.";
DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
示例3: CHECK
void SegDataLayer<Dtype>::load_batch(Batch<Dtype>* batch) {
CPUTimer batch_timer;
batch_timer.Start();
double deque_time = 0;
double trans_time = 0;
CPUTimer timer;
CHECK(batch->data_.count());
CHECK(this->transformed_data_.count());
// Reshape on single input batches for inputs of varying dimension.
const int batch_size = this->layer_param_.data_param().batch_size();
Dtype* top_data = batch->data_.mutable_cpu_data();
Dtype* top_label = NULL; // suppress warnings about uninitialized variables
if (this->output_labels_) {
top_label = batch->label_.mutable_cpu_data();
}
for (int item_id = 0; item_id < batch_size; ++item_id) {
// get a blob
timer.Start();
Datum& datum = *(reader_.full().pop("Waiting for data"));
deque_time += timer.MicroSeconds();
// Apply data transformations (mirror, scale, crop...)
timer.Start();
const int offset_data = batch->data_.offset(item_id);
const int offset_label = batch->label_.offset(item_id);
this->transformed_data_.set_cpu_data(top_data + offset_data);
this->transformed_label_.set_cpu_data(top_label + offset_label);
this->data_transformer_->SegTransform(datum,
&(this->transformed_data_),
&(this->transformed_label_));
trans_time += timer.MicroSeconds();
reader_.free().push(const_cast<Datum*>(&datum));
}
timer.Stop();
batch_timer.Stop();
#ifdef BENCHMARK_DATA
LOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
LOG(INFO) << " Dequeue time: " << deque_time / 1000 << " ms.";
LOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
#endif
}
示例4: CHECK
//.........这里部分代码省略.........
num_bboxes += transformed_anno_vec[g].annotation_size();
}
} else {
LOG(FATAL) << "Unknown annotation type.";
}
all_anno[item_id] = transformed_anno_vec;
} else {
this->data_transformer_->Transform(sampled_datum->datum(),
&(this->transformed_data_));
// Otherwise, store the label from datum.
CHECK(sampled_datum->datum().has_label()) << "Cannot find any label.";
top_label[item_id] = sampled_datum->datum().label();
}
}
else
{
this->data_transformer_->Transform(sampled_datum->datum(),
&(this->transformed_data_));
}
// clear memory=====================================================
if (has_sampled)
{
delete sampled_datum;
}
if (transform_param.has_expand_param())
{
delete expand_datum;
}
trans_time += timer.MicroSeconds();
reader_.free().push(const_cast<AnnotatedDatum*>(&anno_datum));
}
// 转换检测数据AnnotatedDatum
// 图像数据
// datum = anno_datum->mutable_datum()
// datum->set_data(); // 所有8位像素数据
// datum->set_channels(cv_img.channels()); // 通道数量
// datum->set_height(cv_img.rows); // 行
// datum->set_width(cv_img.cols); // 列
// datum->set_encoded(true); // 编码?
// 标签
// anno_datum->mutable_annotation_group(g)->set_group_label(label) 标签
// ->add_annotation()->mutable_bbox() 边框数据
// Store "rich" annotation if needed.
if (this->output_labels_ && has_anno_type_) {
vector<int> label_shape(4);
if (anno_type_ == AnnotatedDatum_AnnotationType_BBOX) {
label_shape[0] = 1;
label_shape[1] = 1;
label_shape[3] = 8;
// 无边框 标签label
if (num_bboxes == 0)
{
// Store all -1 in the label.
label_shape[2] = 1;
batch->label_.Reshape(label_shape);
caffe_set<Dtype>(8, -1, batch->label_.mutable_cpu_data());
}
else
{
// Reshape the label and store the annotation.
label_shape[2] = num_bboxes;// 多个边框数据
batch->label_.Reshape(label_shape);
top_label = batch->label_.mutable_cpu_data();
int idx = 0;
for (int item_id = 0; item_id < batch_size; ++item_id) {
const vector<AnnotationGroup>& anno_vec = all_anno[item_id];
for (int g = 0; g < anno_vec.size(); ++g)
{
const AnnotationGroup& anno_group = anno_vec[g];
for (int a = 0; a < anno_group.annotation_size(); ++a)
{
const Annotation& anno = anno_group.annotation(a);
const NormalizedBBox& bbox = anno.bbox();// 边框数据
top_label[idx++] = item_id;// batch id 0~3 / 0~8 / 0~15
top_label[idx++] = anno_group.group_label();// 类比标签 id
top_label[idx++] = anno.instance_id();
top_label[idx++] = bbox.xmin();// 左上角
top_label[idx++] = bbox.ymin();
top_label[idx++] = bbox.xmax();// 右下角
top_label[idx++] = bbox.ymax();
top_label[idx++] = bbox.difficult();
}
}
}
}
}
else
{
LOG(FATAL) << "Unknown annotation type.";
}
}
timer.Stop();
batch_timer.Stop();
DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
DLOG(INFO) << " Read time: " << read_time / 1000 << " ms.";
DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
示例5: CHECK
void MILDataLayer<Dtype>::load_batch(Batch<Dtype>* batch) {
CPUTimer timer;
timer.Start();
CHECK(batch->data_.count());
//Dtype* top_data = this->prefetch_data_.mutable_cpu_data();
//Dtype* top_label = this->prefetch_label_.mutable_cpu_data();
Dtype* top_data = batch->data_.mutable_cpu_data();
Dtype* top_label = batch->label_.mutable_cpu_data();
const int img_size = this->transform_param_.crop_size();
const int channels = this->layer_param_.mil_data_param().channels();
const int scale = this->transform_param_.scale();
const bool mirror = this->transform_param_.mirror();
const int images_per_batch = this->layer_param_.mil_data_param().images_per_batch();
const int n_classes = this->layer_param_.mil_data_param().n_classes();
const int num_scales = this->layer_param_.mil_data_param().num_scales();
const float scale_factor = this->layer_param_.mil_data_param().scale_factor();
// zero out batch
//caffe_set(this->prefetch_data_.count(), Dtype(0), top_data);
caffe_set(batch->data_.count(), Dtype(0), top_data);
int item_id;
for(int i_image = 0; i_image < images_per_batch; i_image++){
// Sample which image to read
unsigned int index = counter_; counter_ = counter_ + 1;
const unsigned int rand_index = this->PrefetchRand();
if(this->layer_param_.mil_data_param().randomize())
index = rand_index;
// LOG(INFO) << index % this->num_images_ << ", " << this->num_images_;
pair<string, string> p = this->image_database_[index % this->num_images_];
string im_name = p.first;
string full_im_name = p.second;
cv::Mat cv_img = cv::imread(full_im_name, CV_LOAD_IMAGE_COLOR);
if (!cv_img.data) {
LOG(ERROR) << "Could not open or find file " << full_im_name;
return;
}
//REVIEW ktran: do not hardcode dataset name (or its prefix "/labels-")
//REVIEW ktran: also do not use deep dataset name so that we don't have to modify the core caffe code
//(ref: https://github.com/BVLC/caffe/commit/a0787631a27ca6478f70341462aafdcf35dabb19)
hdf5_load_nd_dataset(this->label_file_id_, string("/labels-"+im_name).c_str(), 4, 4, &this->label_blob_);
const Dtype* label = label_blob_.mutable_cpu_data();
CHECK_EQ(label_blob_.width(), 1) << "Expected width of label to be 1." ;
CHECK_EQ(label_blob_.height(), n_classes) << "Expected height of label to be " << n_classes;
CHECK_EQ(label_blob_.channels(), 1) << "Expected channels of label to be 1." ;
CHECK_EQ(label_blob_.num(), 1) << "Expected num of label to be 1." ;
float img_size_i = img_size;
for(int i_scales = 0; i_scales < num_scales; i_scales++){
// Resize such that the image is of size img_size, img_size
item_id = i_image*num_scales + i_scales;
// LOG(INFO) << "MIL Data Layer: scale: " << (int) round(img_size_i);
cv::Mat cv_cropped_img = Transform_IDL(cv_img, static_cast<int>(round(img_size_i)), mirror);
for (int c = 0; c < channels; ++c) {
for (int h = 0; h < cv_cropped_img.rows; ++h) {
for (int w = 0; w < cv_cropped_img.cols; ++w) {
Dtype pixel =
static_cast<Dtype>(cv_cropped_img.at<cv::Vec3b>(h, w)[c]);
top_data[((item_id * channels + c) * img_size + h)
* img_size + w]
= (pixel - static_cast<Dtype>(mean_value_[c]))*scale;
}
}
}
img_size_i = std::max(static_cast<float>(1.), img_size_i*scale_factor);
}
for(int i_label = 0; i_label < n_classes; i_label++){
top_label[i_image*n_classes + i_label] =
label[i_label];
}
}
timer.Stop();
DLOG(INFO) << "Prefetch batch: " << timer.MilliSeconds() << " ms.";
}
示例6: CHECK
//.........这里部分代码省略.........
top_label = batch->label_.mutable_cpu_data();
// top_label = this->prefetch_label_.mutable_cpu_data();
}
Dtype* use_data=this->use_data_.mutable_cpu_data();
// LOG(INFO)<<" use_data[0]:"<<use_data[0];
if (use_data[0]==0.0){
// LOG(INFO)<<"visit in order";
for (int item_id = 0; item_id < batch_size; item_id++) {
Datum datum;
datum.ParseFromString(cursor_->value());
// Apply data transformations (mirror, scale, crop...)
// LOG(INFO)<<"jq enter data_layers"<< item_id;
int offset = batch->data_.offset(item_id);
// LOG(INFO)<<"jq enter data_layers";
this->transformed_data_.set_cpu_data(top_data + offset);
this->data_transformer_->Transform(datum, &(this->transformed_data_));
// Copy label.
if (this->output_labels_) {
top_label[item_id] = datum.label();
// std::cout<<" cursor_:"<<datum.label();
}
// use_data[item_id +5] = start;
// trans_time += timer.MicroSeconds();
cursor_->Next();
// start +=1.0;
// std::cout<<" output_labels_:"<<this->output_labels_;
if (!cursor_->valid()) {
DLOG(INFO) << "Restarting data prefetching from start.";
cursor_->SeekToFirst();
}
// reader_.free().push(const_cast<Datum*>(&datum));
}
}else if (use_data[0]!=0.0){
// forward-backward using semi supervised with false label
// 0, sami-super-unsuper, 1, label_kinds, 2, step over,
// 3, datanum, 4, start index
// LOG(INFO)<<"visit in Key/value";
// LOG(INFO)<<"this->PREFETCH_COUNT:"<<this->PREFETCH_COUNT;
int step_over = batch_size+1;
// std::cout<<std::endl;
scoped_ptr<db::Transaction> txn(db_->NewTransaction());
// std::cout<<"key:";
int kCIFARImageNBytes=3072;
for (int item_id = 0; item_id < batch_size; item_id++) {
char str_buffer[kCIFARImageNBytes];
int id= static_cast<int>(use_data[item_id+ 1]);
// std::cout<<" "<<id<<":";
int length = snprintf(str_buffer, kCIFARImageNBytes, "%05d", id);
string value;
string str=string(str_buffer, length);
txn->Get(str, value);
Datum datum;
datum.ParseFromString(value);
int offset = batch->data_.offset(item_id);
// LOG(INFO)<<"jq enter data_layers";
this->transformed_data_.set_cpu_data(top_data + offset);
this->data_transformer_->Transform(datum, &(this->transformed_data_));
// std::cout<<" output_labels_:"<<this->output_labels_;
if (this->output_labels_) {
// top_label[item_id] = datum.label();
top_label[item_id] = use_data[item_id+ step_over];
// std::cout<<" KV:"<<datum.label();
// top_label[item_id]= static_cast<int>(use_data[item_id + batch_size +3]);
}
if( use_data[item_id+ step_over]!=(datum.label()%1000))
LOG(INFO)<<"image id:"<<id<<" not correctly fetch: "<<datum.label()
<<" vs "<<use_data[item_id+ step_over];
// std::cout<<top_label[item_id];
// std::cout<<" key:"<<id;
}
// std::cout<<std::endl;
// for (int item_id = 0; item_id < 50000; item_id++) {
// char str_buffer[kCIFARImageNBytes];
// // int id= static_cast<int>(use_data[item_id+ 1]);
// int length = snprintf(str_buffer, kCIFARImageNBytes, "%05d", item_id);
// string value;
// string str=string(str_buffer, length);
// txn->Get(str, value);
// // Datum datum;
// // datum.ParseFromString(value);
// // int offset = batch->data_.offset(item_id);
// // // LOG(INFO)<<"jq enter data_layers";
// // this->transformed_data_.set_cpu_data(top_data + offset);
// // this->data_transformer_->Transform(datum, &(this->transformed_data_));
// // if (this->output_labels_) {
// // top_label[item_id] = datum.label();
// // // top_label[item_id]= static_cast<int>(use_data[item_id + batch_size +3]);
// // }
// // std::cout<<" "<<item_id;
// }
// std::cout<<std::endl;
txn->Commit();
}
timer.Stop();
batch_timer.Stop();
DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
DLOG(INFO) << " Read time: " << read_time / 1000 << " ms.";
DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
示例7: CHECK
//.........这里部分代码省略.........
int offset = batch->data_.offset(item_id);
Dtype* top_data = batch->data_.mutable_cpu_data();
this->transformed_data_.set_cpu_data(top_data + offset);
vector<AnnotationGroup> transformed_anno_vec;
if (this->output_labels_) {
if (has_anno_type_) {
// Make sure all data have same annotation type.
CHECK(sampled_datum->has_type()) << "Some datum misses AnnotationType.";
if (anno_data_param.has_anno_type()) {
sampled_datum->set_type(anno_type_);
} else {
CHECK_EQ(anno_type_, sampled_datum->type()) <<
"Different AnnotationType.";
}
// Transform datum and annotation_group at the same time
transformed_anno_vec.clear();
this->data_transformer_->Transform(*sampled_datum,
&(this->transformed_data_),
&transformed_anno_vec);
if (anno_type_ == AnnotatedDatum_AnnotationType_BBOX) {
// Count the number of bboxes.
for (int g = 0; g < transformed_anno_vec.size(); ++g) {
num_bboxes += transformed_anno_vec[g].annotation_size();
}
} else {
LOG(FATAL) << "Unknown annotation type.";
}
all_anno[item_id] = transformed_anno_vec;
} else {
this->data_transformer_->Transform(sampled_datum->datum(),
&(this->transformed_data_));
// Otherwise, store the label from datum.
CHECK(sampled_datum->datum().has_label()) << "Cannot find any label.";
Dtype* top_label = batch->label_.mutable_cpu_data();
top_label[item_id] = sampled_datum->datum().label();
}
} else {
this->data_transformer_->Transform(sampled_datum->datum(),
&(this->transformed_data_));
}
// clear memory
if (has_sampled) {
delete sampled_datum;
}
if (transform_param.has_expand_param()) {
delete expand_datum;
}
trans_time += timer.MicroSeconds();
//reader_.free().push(const_cast<AnnotatedDatum*>(&anno_datum));
Next();
}
// Store "rich" annotation if needed.
if (this->output_labels_ && has_anno_type_) {
vector<int> label_shape(4);
if (anno_type_ == AnnotatedDatum_AnnotationType_BBOX) {
label_shape[0] = 1;
label_shape[1] = 1;
label_shape[3] = 8;
if (num_bboxes == 0) {
// Store all -1 in the label.
label_shape[2] = 1;
batch->label_.Reshape(label_shape);
caffe_set<Dtype>(8, -1, batch->label_.mutable_cpu_data());
} else {
// Reshape the label and store the annotation.
label_shape[2] = num_bboxes;
batch->label_.Reshape(label_shape);
Dtype* top_label = batch->label_.mutable_cpu_data();
int idx = 0;
for (int item_id = 0; item_id < batch_size; ++item_id) {
const vector<AnnotationGroup>& anno_vec = all_anno[item_id];
for (int g = 0; g < anno_vec.size(); ++g) {
const AnnotationGroup& anno_group = anno_vec[g];
for (int a = 0; a < anno_group.annotation_size(); ++a) {
const Annotation& anno = anno_group.annotation(a);
const NormalizedBBox& bbox = anno.bbox();
top_label[idx++] = item_id;
top_label[idx++] = anno_group.group_label();
top_label[idx++] = anno.instance_id();
top_label[idx++] = bbox.xmin();
top_label[idx++] = bbox.ymin();
top_label[idx++] = bbox.xmax();
top_label[idx++] = bbox.ymax();
top_label[idx++] = bbox.difficult();
}
}
}
}
} else {
LOG(FATAL) << "Unknown annotation type.";
}
}
timer.Stop();
batch_timer.Stop();
DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
DLOG(INFO) << " Read time: " << read_time / 1000 << " ms.";
DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}