本文整理汇总了C++中CPUTimer::Start方法的典型用法代码示例。如果您正苦于以下问题:C++ CPUTimer::Start方法的具体用法?C++ CPUTimer::Start怎么用?C++ CPUTimer::Start使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类CPUTimer
的用法示例。
在下文中一共展示了CPUTimer::Start方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: CHECK
void MyDataLayer<Dtype>::load_batch(Batch<Dtype>* batch){
CPUTimer batch_timer;
batch_timer.Start();
double read_time = 0;
double trans_time = 0;
CPUTimer timer;
CHECK(batch->data_.count());
CHECK(this->transformed_data_.count());
MyDataParameter my_data_param = this-> layer_param_.my_data_param();
// Get batch size
const int batch_size = my_data_param.batch_size();
// Reshape according to the first image of each batch
// on single input batches allows for inputs of varying dimension
cv::Mat cv_img = samples_[lines_id_].first;
CHECK(cv_img.data) << "Could not load "<<lines_id_<<" sample";
// Use data_transformer to infer the expected blob shape from a cv_img
vector<int> top_shape = this->data_transformer_->InferBlobShape(cv_img);
this->transformed_data_.Reshape(top_shape);
// Reshape batch according to the batch_size
top_shape[0] = batch_size;
batch->data_.Reshape(top_shape);
Dtype* prefetch_data = batch->data_.mutable_cpu_data();
Dtype* prefetch_label= batch->label_.mutable_cpu_data();
// datum scales
int samples_size = samples_.size();
for(int item_id=0;item_id<batch_size;++item_id){
// get a blob
timer.Start();
CHECK_GT(samples_size, lines_id_);
cv::Mat sample = samples_[lines_id_].first;
CHECK(sample.data) << "Could not load "<<lines_id_<<" sample";
read_time += timer.MicroSeconds();
timer.Start();
// apply transformations to the image
int offset = batch->data_.offset(item_id);
this->transformed_data_.set_cpu_data(prefetch_data + offset);
this->data_transformer_->Transform(sample,&(this->transformed_data_));
trans_time += timer.MicroSeconds();
prefetch_label[item_id] = samples_[lines_id_].second;
// got the the next iter
lines_id_++;
if(lines_id_>=samples_size){
// We have reached the end. restart from the first.
DLOG(INFO) << "Restarting data prefetching from start.";
lines_id_=0;
if(my_data_param.shuffle()){
ShuffleImages();
}
}
}
batch_timer.Stop();
DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
DLOG(INFO) << " Read time: " << read_time / 1000 << " ms.";
DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
示例2: CHECK
void DataLayer<Dtype>::InternalThreadEntry() {
CPUTimer batch_timer;
batch_timer.Start();
double read_time = 0;
double trans_time = 0;
CPUTimer timer;
CHECK(this->prefetch_data_.count());
CHECK(this->transformed_data_.count());
// Reshape according to the first datum of each batch
// on single input batches allows for inputs of varying dimension.
const int batch_size = this->layer_param_.data_param().batch_size();
Datum datum;
datum.ParseFromString(cursor_->value());
// Use data_transformer to infer the expected blob shape from datum.
vector<int> top_shape = this->data_transformer_->InferBlobShape(datum);
this->transformed_data_.Reshape(top_shape);
// Reshape prefetch_data according to the batch_size.
top_shape[0] = batch_size;
this->prefetch_data_.Reshape(top_shape);
Dtype* top_data = this->prefetch_data_.mutable_cpu_data();
Dtype* top_label = NULL; // suppress warnings about uninitialized variables
if (this->output_labels_) {
top_label = this->prefetch_label_.mutable_cpu_data();
}
timer.Start();
for (int item_id = 0; item_id < batch_size; ++item_id) {
// get a datum
Datum datum;
datum.ParseFromString(cursor_->value());
read_time += timer.MicroSeconds();
timer.Start();
// Apply data transformations (mirror, scale, crop...)
int offset = this->prefetch_data_.offset(item_id);
this->transformed_data_.set_cpu_data(top_data + offset);
this->data_transformer_->Transform(datum, &(this->transformed_data_));
// Copy label.
if (this->output_labels_) {
top_label[item_id] = datum.label();
}
trans_time += timer.MicroSeconds();
timer.Start();
// go to the next item.
cursor_->Next();
if (!cursor_->valid()) {
DLOG(INFO) << "Restarting data prefetching from start.";
cursor_->SeekToFirst();
}
}
timer.Stop();
batch_timer.Stop();
DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
DLOG(INFO) << " Read time: " << read_time / 1000 << " ms.";
DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
示例3: if
void FlowDataLayer<Dtype>::load_batch(Batch<Dtype>* batch) {
CPUTimer batch_timer;
batch_timer.Start();
double read_time = 0;
double trans_time = 0;
CPUTimer timer;
CHECK(batch->data_.count());
CHECK(this->transformed_data_.count());
// Reshape according to the first datum of each batch
// on single input batches allows for inputs of varying dimension.
const int batch_size = this->layer_param_.flow_data_param().batch_size();
Datum& datum = *(reader_.full().peek());
// Use data_transformer to infer the expected blob shape from datum.
vector<int> top_shape = this->data_transformer_->InferBlobShape(datum);
top_shape[0] = num_test_views_;
this->transformed_data_.Reshape(top_shape);
// Reshape batch according to the batch_size.
top_shape[0] = batch_size * num_test_views_;
batch->data_.Reshape(top_shape);
Dtype* top_data = batch->data_.mutable_cpu_data();
Dtype* top_label = NULL; // suppress warnings about uninitialized variables
if (this->output_labels_) {
top_label = batch->label_.mutable_cpu_data();
}
for (int item_id = 0; item_id < batch_size; ++item_id) {
timer.Start();
// get a datum
Datum& datum = *(reader_.full().pop("Waiting for flow data"));
read_time += timer.MicroSeconds();
// DLOG(INFO) << "number of data in full queue: " << reader_.full().size();
timer.Start();
// Apply data transformations (mirror, scale, crop...)
int offset = batch->data_.offset(item_id * num_test_views_);
this->transformed_data_.set_cpu_data(top_data + offset);
if (this->phase_ == TRAIN)
this->data_transformer_->TransformVariedSizeDatum(datum, &(this->transformed_data_));
else if (this->phase_ == TEST)
this->data_transformer_->TransformVariedSizeTestDatum(datum, &(this->transformed_data_), num_test_views_);
// Copy label.
if (this->output_labels_) {
top_label[item_id] = datum.label();
}
trans_time += timer.MicroSeconds();
reader_.free().push(const_cast<Datum*>(&datum));
}
timer.Stop();
batch_timer.Stop();
DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
DLOG(INFO) << " Read time: " << read_time / 1000 << " ms.";
DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
示例4: CHECK
void ImageDataLayer<Dtype>::InternalThreadEntry() {
CPUTimer batch_timer;
batch_timer.Start();
double read_time = 0;
double trans_time = 0;
CPUTimer timer;
CHECK(this->prefetch_data_.count());
CHECK(this->transformed_data_.count());
Dtype* top_data = this->prefetch_data_.mutable_cpu_data();
Dtype* top_label = this->prefetch_label_.mutable_cpu_data();
ImageDataParameter image_data_param = this->layer_param_.image_data_param();
const int batch_size = image_data_param.batch_size();
const int new_height = image_data_param.new_height();
const int new_width = image_data_param.new_width();
const bool is_color = image_data_param.is_color();
string root_folder = image_data_param.root_folder();
// datum scales
const int lines_size = lines_.size();
for (int item_id = 0; item_id < batch_size; ++item_id) {
// get a blob
timer.Start();
CHECK_GT(lines_size, lines_id_);
cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first,
new_height, new_width, is_color);
if (!cv_img.data) {
continue;
}
read_time += timer.MicroSeconds();
timer.Start();
// Apply transformations (mirror, crop...) to the image
int offset = this->prefetch_data_.offset(item_id);
this->transformed_data_.set_cpu_data(top_data + offset);
this->data_transformer_.Transform(cv_img, &(this->transformed_data_));
trans_time += timer.MicroSeconds();
top_label[item_id] = lines_[lines_id_].second;
// go to the next iter
lines_id_++;
if (lines_id_ >= lines_size) {
// We have reached the end. Restart from the first.
DLOG(INFO) << "Restarting data prefetching from start.";
lines_id_ = 0;
if (this->layer_param_.image_data_param().shuffle()) {
ShuffleImages();
}
}
}
batch_timer.Stop();
DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
DLOG(INFO) << " Read time: " << read_time / 1000 << " ms.";
DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
示例5: CHECK
void SegDataLayer<Dtype>::load_batch(Batch<Dtype>* batch) {
CPUTimer batch_timer;
batch_timer.Start();
double deque_time = 0;
double trans_time = 0;
CPUTimer timer;
CHECK(batch->data_.count());
CHECK(this->transformed_data_.count());
// Reshape on single input batches for inputs of varying dimension.
const int batch_size = this->layer_param_.data_param().batch_size();
Dtype* top_data = batch->data_.mutable_cpu_data();
Dtype* top_label = NULL; // suppress warnings about uninitialized variables
if (this->output_labels_) {
top_label = batch->label_.mutable_cpu_data();
}
for (int item_id = 0; item_id < batch_size; ++item_id) {
// get a blob
timer.Start();
Datum& datum = *(reader_.full().pop("Waiting for data"));
deque_time += timer.MicroSeconds();
// Apply data transformations (mirror, scale, crop...)
timer.Start();
const int offset_data = batch->data_.offset(item_id);
const int offset_label = batch->label_.offset(item_id);
this->transformed_data_.set_cpu_data(top_data + offset_data);
this->transformed_label_.set_cpu_data(top_label + offset_label);
this->data_transformer_->SegTransform(datum,
&(this->transformed_data_),
&(this->transformed_label_));
trans_time += timer.MicroSeconds();
reader_.free().push(const_cast<Datum*>(&datum));
}
timer.Stop();
batch_timer.Stop();
#ifdef BENCHMARK_DATA
LOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
LOG(INFO) << " Dequeue time: " << deque_time / 1000 << " ms.";
LOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
#endif
}
示例6: CHECK
void BinaryDataLayer<Dtype>::load_batch(Batch<Dtype>* batch) {
CPUTimer batch_timer;
batch_timer.Start();
double read_time = 0;
static int time_idx = 0;
CPUTimer timer;
CHECK(batch->data_.count());
ImageDataParameter image_data_param = this->layer_param_.image_data_param();
string root_folder = image_data_param.root_folder();
const int batch_size = this->layer_param_.image_data_param().batch_size();
const vector<int> & top_shape = this->top_shape_;
// Reshape batch according to the batch_size.
batch->data_.Reshape(top_shape);
Dtype* prefetch_data = batch->data_.mutable_cpu_data();
Dtype* prefetch_label = batch->label_.mutable_cpu_data();
// datum scales
const int lines_size = lines_.size();
const int count = top_shape[1] * top_shape[2] * top_shape[3];
for (int item_id = 0; item_id < batch_size; ++item_id) {
// get a blob
timer.Start();
CHECK_GT(lines_size, lines_id_);
int offset = batch->data_.offset(item_id);
int ret = ReadBinaryBlob(root_folder + lines_[lines_id_].first,
prefetch_data + offset, count);
read_time += timer.MicroSeconds();
CHECK(ret == 0) << "Could not load " << lines_[lines_id_].first;
prefetch_label[item_id] = lines_[lines_id_].second;
// go to the next iter
lines_id_++;
if (lines_id_ >= lines_size) {
// We have reached the end. Restart from the first.
DLOG(INFO) << "Restarting data prefetching from start.";
lines_id_ = 0;
if (this->layer_param_.image_data_param().shuffle()) {
ShuffleImages();
}
}
}
batch_timer.Stop();
DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
DLOG(INFO) << " Read time: " << read_time / 1000 << " ms.";
}
示例7: CHECK
void DataLstmTrainHistLayer<Dtype>::InternalThreadEntry() {
CPUTimer batch_timer;
batch_timer.Start();
double read_time = 0;
double trans_time = 0;
CPUTimer timer;
CHECK(this->prefetch_data_.count());
Datum datum;
Dtype* top_data = this->prefetch_data_.mutable_cpu_data();
Dtype* top_label = this->prefetch_label_.mutable_cpu_data();
Dtype* top_hist = this->prefetch_hist_.mutable_cpu_data();
Dtype* top_marker = this->prefetch_marker_.mutable_cpu_data();
// datum scales
const int size = resize_height*resize_width*3;
const Dtype* mean = this->data_mean_.mutable_cpu_data();
string value;
const int kMaxKeyLength = 256;
char key_cstr[kMaxKeyLength];
int key;
const int sequence_size = this->layer_param_.data_lstm_train_hist_param().sequence_size();
const int ind_seq_num=this->layer_param_.data_lstm_train_hist_param().sequence_num();
const int interval=this->layer_param_.data_lstm_train_hist_param().interval();
int item_id;
for (int time_id = 0; time_id < sequence_size; ++time_id) {
for (int seq_id = 0; seq_id < ind_seq_num; ++seq_id) {
item_id=time_id*ind_seq_num+seq_id;
timer.Start();
// get a blob
key=buffer_key[seq_id]; // MUST be changed according to the size of the training set
snprintf(key_cstr, kMaxKeyLength, "%08d", key);
db_->Get(leveldb::ReadOptions(), string(key_cstr), &value);
datum.ParseFromString(value);
const string& data = datum.data();
read_time += timer.MicroSeconds();
timer.Start();
for (int j = 0; j < size; ++j) {
Dtype datum_element = static_cast<Dtype>(static_cast<uint8_t>(data[j]));
top_data[item_id * size + j] = (datum_element - mean[j]);
}
for (int j = 0; j < para_dim; ++j) {
top_label[item_id * para_dim + j] = datum.float_data(j);
}
top_marker[item_id] = datum.float_data(para_dim);
if (buffer_marker[seq_id] == 0) {
top_marker[item_id] = 0;
buffer_marker[seq_id] = 1;
}
//////////////////////////////////// for hist
if (top_marker[item_id] < 0.5) {
for (int j = 0; j < para_dim; ++j)
top_hist[item_id * para_dim + j] = 0;
} else {
if (time_id == 0) {
top_hist[item_id * para_dim + 0] = hist_blob[seq_id * para_dim + 0]/1.1+0.5;
top_hist[item_id * para_dim + 1] = hist_blob[seq_id * para_dim + 1]*0.17778+1.34445;
top_hist[item_id * para_dim + 2] = hist_blob[seq_id * para_dim + 2]*0.14545+0.39091;
top_hist[item_id * para_dim + 3] = hist_blob[seq_id * para_dim + 3]*0.17778-0.34445;
top_hist[item_id * para_dim + 4] = hist_blob[seq_id * para_dim + 4]/95.0+0.12;
top_hist[item_id * para_dim + 5] = hist_blob[seq_id * para_dim + 5]/95.0+0.12;
top_hist[item_id * para_dim + 6] = hist_blob[seq_id * para_dim + 6]*0.14545+1.48181;
top_hist[item_id * para_dim + 7] = hist_blob[seq_id * para_dim + 7]*0.16+0.98;
top_hist[item_id * para_dim + 8] = hist_blob[seq_id * para_dim + 8]*0.16+0.02;
top_hist[item_id * para_dim + 9] = hist_blob[seq_id * para_dim + 9]*0.14545-0.48181;
top_hist[item_id * para_dim + 10] = hist_blob[seq_id * para_dim + 10]/95.0+0.12;
top_hist[item_id * para_dim + 11] = hist_blob[seq_id * para_dim + 11]/95.0+0.12;
top_hist[item_id * para_dim + 12] = hist_blob[seq_id * para_dim + 12]/95.0+0.12;
top_hist[item_id * para_dim + 13] = hist_blob[seq_id * para_dim + 13]*0.6+0.2;
} else {
int pre_id=(time_id-1)*ind_seq_num+seq_id;
top_hist[item_id * para_dim + 0] = top_label[pre_id * para_dim + 0]/1.1+0.5;
top_hist[item_id * para_dim + 1] = top_label[pre_id * para_dim + 1]*0.17778+1.34445;
top_hist[item_id * para_dim + 2] = top_label[pre_id * para_dim + 2]*0.14545+0.39091;
top_hist[item_id * para_dim + 3] = top_label[pre_id * para_dim + 3]*0.17778-0.34445;
top_hist[item_id * para_dim + 4] = top_label[pre_id * para_dim + 4]/95.0+0.12;
top_hist[item_id * para_dim + 5] = top_label[pre_id * para_dim + 5]/95.0+0.12;
top_hist[item_id * para_dim + 6] = top_label[pre_id * para_dim + 6]*0.14545+1.48181;
top_hist[item_id * para_dim + 7] = top_label[pre_id * para_dim + 7]*0.16+0.98;
top_hist[item_id * para_dim + 8] = top_label[pre_id * para_dim + 8]*0.16+0.02;
top_hist[item_id * para_dim + 9] = top_label[pre_id * para_dim + 9]*0.14545-0.48181;
top_hist[item_id * para_dim + 10] = top_label[pre_id * para_dim + 10]/95.0+0.12;
top_hist[item_id * para_dim + 11] = top_label[pre_id * para_dim + 11]/95.0+0.12;
top_hist[item_id * para_dim + 12] = top_label[pre_id * para_dim + 12]/95.0+0.12;
top_hist[item_id * para_dim + 13] = top_label[pre_id * para_dim + 13]*0.6+0.2;
}
}
//////////////////////////////////// for hist
//.........这里部分代码省略.........
示例8: CHECK
void MultiImageDataLayer<Dtype>::load_batch(Batch<Dtype>* batch) {
CPUTimer batch_timer;
batch_timer.Start();
double read_time = 0;
double trans_time = 0;
CPUTimer timer;
CHECK(batch->data_.count());
CHECK(this->transformed_data_.count());
MultiImageDataParameter multi_image_data_param = this->layer_param_.multi_image_data_param();
const int batch_size = multi_image_data_param.batch_size();
const int new_height = multi_image_data_param.new_height();
const int new_width = multi_image_data_param.new_width();
const bool is_color = multi_image_data_param.is_color();
string root_folder = multi_image_data_param.root_folder();
const int num_images = this->layer_param_.multi_image_data_param().num_images();
// Reshape according to the first image of each batch
// on single input batches allows for inputs of varying dimension.
cv::Mat cv_img = ReadImageToCVMat(root_folder + *lines_[lines_id_].first.begin(),
new_height, new_width, is_color);
CHECK(cv_img.data) << "Could not load " << *lines_[lines_id_].first.begin();
// Use data_transformer to infer the expected blob shape from a cv_img.
vector<int> top_shape = this->data_transformer_->InferBlobShape(cv_img);
this->transformed_data_.Reshape(top_shape);
top_shape[1] *= num_images;
// Reshape batch according to the batch_size.
top_shape[0] = batch_size;
batch->data_.Reshape(top_shape);
Dtype* prefetch_data = batch->data_.mutable_cpu_data();
Dtype* prefetch_label = batch->label_.mutable_cpu_data();
// datum scales
const int lines_size = lines_.size();
for (int item_id = 0; item_id < batch_size; ++item_id) {
// get a blob
timer.Start();
CHECK_GT(lines_size, lines_id_);
if (this->layer_param_.multi_image_data_param().shuffle_images() == true) {
caffe::rng_t* prefetch_rng =
static_cast<caffe::rng_t*>(prefetch_rng_->generator());
shuffle(lines_[lines_id_].first.begin(), lines_[lines_id_].first.end(), prefetch_rng);
}
read_time += timer.MicroSeconds();
timer.Start();
for (int image_index = 0; image_index < num_images; image_index++) {
cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first[image_index], new_height, new_width, is_color);
CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first[image_index];
// Apply transformations (mirror, crop...) to the image
int offset = batch->data_.offset(item_id, image_index * cv_img.channels());
this->transformed_data_.set_cpu_data(prefetch_data + offset);
this->data_transformer_->Transform(cv_img, &(this->transformed_data_));
}
trans_time += timer.MicroSeconds();
prefetch_label[item_id] = lines_[lines_id_].second;
// go to the next iter
lines_id_++;
if (lines_id_ >= lines_size) {
// We have reached the end. Restart from the first.
DLOG(INFO) << "Restarting data prefetching from start.";
lines_id_ = 0;
if (this->layer_param_.multi_image_data_param().shuffle()) {
ShuffleImages();
}
}
}
batch_timer.Stop();
DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
DLOG(INFO) << " Read time: " << read_time / 1000 << " ms.";
DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
示例9: CHECK
void MILDataLayer<Dtype>::load_batch(Batch<Dtype>* batch) {
CPUTimer timer;
timer.Start();
CHECK(batch->data_.count());
//Dtype* top_data = this->prefetch_data_.mutable_cpu_data();
//Dtype* top_label = this->prefetch_label_.mutable_cpu_data();
Dtype* top_data = batch->data_.mutable_cpu_data();
Dtype* top_label = batch->label_.mutable_cpu_data();
const int img_size = this->transform_param_.crop_size();
const int channels = this->layer_param_.mil_data_param().channels();
const int scale = this->transform_param_.scale();
const bool mirror = this->transform_param_.mirror();
const int images_per_batch = this->layer_param_.mil_data_param().images_per_batch();
const int n_classes = this->layer_param_.mil_data_param().n_classes();
const int num_scales = this->layer_param_.mil_data_param().num_scales();
const float scale_factor = this->layer_param_.mil_data_param().scale_factor();
// zero out batch
//caffe_set(this->prefetch_data_.count(), Dtype(0), top_data);
caffe_set(batch->data_.count(), Dtype(0), top_data);
int item_id;
for(int i_image = 0; i_image < images_per_batch; i_image++){
// Sample which image to read
unsigned int index = counter_; counter_ = counter_ + 1;
const unsigned int rand_index = this->PrefetchRand();
if(this->layer_param_.mil_data_param().randomize())
index = rand_index;
// LOG(INFO) << index % this->num_images_ << ", " << this->num_images_;
pair<string, string> p = this->image_database_[index % this->num_images_];
string im_name = p.first;
string full_im_name = p.second;
cv::Mat cv_img = cv::imread(full_im_name, CV_LOAD_IMAGE_COLOR);
if (!cv_img.data) {
LOG(ERROR) << "Could not open or find file " << full_im_name;
return;
}
//REVIEW ktran: do not hardcode dataset name (or its prefix "/labels-")
//REVIEW ktran: also do not use deep dataset name so that we don't have to modify the core caffe code
//(ref: https://github.com/BVLC/caffe/commit/a0787631a27ca6478f70341462aafdcf35dabb19)
hdf5_load_nd_dataset(this->label_file_id_, string("/labels-"+im_name).c_str(), 4, 4, &this->label_blob_);
const Dtype* label = label_blob_.mutable_cpu_data();
CHECK_EQ(label_blob_.width(), 1) << "Expected width of label to be 1." ;
CHECK_EQ(label_blob_.height(), n_classes) << "Expected height of label to be " << n_classes;
CHECK_EQ(label_blob_.channels(), 1) << "Expected channels of label to be 1." ;
CHECK_EQ(label_blob_.num(), 1) << "Expected num of label to be 1." ;
float img_size_i = img_size;
for(int i_scales = 0; i_scales < num_scales; i_scales++){
// Resize such that the image is of size img_size, img_size
item_id = i_image*num_scales + i_scales;
// LOG(INFO) << "MIL Data Layer: scale: " << (int) round(img_size_i);
cv::Mat cv_cropped_img = Transform_IDL(cv_img, static_cast<int>(round(img_size_i)), mirror);
for (int c = 0; c < channels; ++c) {
for (int h = 0; h < cv_cropped_img.rows; ++h) {
for (int w = 0; w < cv_cropped_img.cols; ++w) {
Dtype pixel =
static_cast<Dtype>(cv_cropped_img.at<cv::Vec3b>(h, w)[c]);
top_data[((item_id * channels + c) * img_size + h)
* img_size + w]
= (pixel - static_cast<Dtype>(mean_value_[c]))*scale;
}
}
}
img_size_i = std::max(static_cast<float>(1.), img_size_i*scale_factor);
}
for(int i_label = 0; i_label < n_classes; i_label++){
top_label[i_image*n_classes + i_label] =
label[i_label];
}
}
timer.Stop();
DLOG(INFO) << "Prefetch batch: " << timer.MilliSeconds() << " ms.";
}
示例10: CHECK
void DataLayer<Dtype>::InternalThreadEntry() {
CPUTimer batch_timer;
batch_timer.Start();
double read_time = 0;
double trans_time = 0;
CPUTimer timer;
CHECK(this->prefetch_data_.count());
CHECK(this->transformed_data_.count());
// Reshape on single input batches for inputs of varying dimension.
const int batch_size = this->layer_param_.data_param().batch_size();
const int crop_size = this->layer_param_.transform_param().crop_size();
if (batch_size == 1 && crop_size == 0) {
Datum datum;
datum.ParseFromString(cursor_->value());
this->prefetch_data_.Reshape(1, datum.channels(),
datum.height(), datum.width());
this->transformed_data_.Reshape(1, datum.channels(),
datum.height(), datum.width());
}
Dtype* top_data = this->prefetch_data_.mutable_cpu_data();
Dtype* top_label = NULL; // suppress warnings about uninitialized variables
if (this->output_labels_) {
top_label = this->prefetch_label_.mutable_cpu_data();
}
bool force_color = this->layer_param_.data_param().force_encoded_color();
for (int item_id = 0; item_id < batch_size; ++item_id) {
timer.Start();
// get a blob
Datum datum;
datum.ParseFromString(cursor_->value());
cv::Mat cv_img;
if (datum.encoded()) {
if (force_color) {
cv_img = DecodeDatumToCVMat(datum, true);
} else {
cv_img = DecodeDatumToCVMatNative(datum);
}
if (cv_img.channels() != this->transformed_data_.channels()) {
LOG(WARNING) << "Your dataset contains encoded images with mixed "
<< "channel sizes. Consider adding a 'force_color' flag to the "
<< "model definition, or rebuild your dataset using "
<< "convert_imageset.";
}
}
read_time += timer.MicroSeconds();
timer.Start();
// Apply data transformations (mirror, scale, crop...)
int offset = this->prefetch_data_.offset(item_id);
this->transformed_data_.set_cpu_data(top_data + offset);
if (datum.encoded()) {
this->data_transformer_->Transform(cv_img, &(this->transformed_data_));
} else {
this->data_transformer_->Transform(datum, &(this->transformed_data_));
}
if (this->output_labels_) {
for (int label_i = 0; label_i < datum.label_size(); ++label_i){
top_label[item_id * datum.label_size() + label_i] = datum.label(label_i);
}
//top_label[item_id] = datum.label();
}
trans_time += timer.MicroSeconds();
// go to the next iter
cursor_->Next();
if (!cursor_->valid()) {
DLOG(INFO) << "Restarting data prefetching from start.";
cursor_->SeekToFirst();
}
}
batch_timer.Stop();
DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
DLOG(INFO) << " Read time: " << read_time / 1000 << " ms.";
DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
示例11: CHECK
void DepthDataLayer<Dtype>::InternalThreadEntry() {
CPUTimer batch_timer;
batch_timer.Start();
double read_time = 0;
double trans_time = 0;
CPUTimer timer;
CHECK(this->prefetch_data_.count());
CHECK(this->transformed_data_.count());
//CHECK(this->transformed_data_.count());
DepthDataParameter depth_data_param = this->layer_param_.depth_data_param();
const int batch_size = depth_data_param.batch_size();
const int new_height = depth_data_param.new_height();
const int new_width = depth_data_param.new_width();
const bool is_color = depth_data_param.is_color();
string root_folder = depth_data_param.root_folder();
// Reshape according to the first image of each batch
// on single input batches allows for inputs of varying dimension.
cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first,
new_height, new_width, is_color);
CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first;
// Use data_transformer to infer the expected blob shape from a cv_img.
vector<int> top_shape = this->data_transformer_->InferBlobShape(cv_img);
this->transformed_data_.Reshape(top_shape);
// Reshape prefetch_data according to the batch_size.
top_shape[0] = batch_size;
this->prefetch_data_.Reshape(top_shape);
Dtype* prefetch_data = this->prefetch_data_.mutable_cpu_data();
Dtype* prefetch_label = this->prefetch_label_.mutable_cpu_data();
// datum scales
const int lines_size = lines_.size();
for (int item_id = 0; item_id < batch_size; ++item_id) {
// get a blob
timer.Start();
CHECK_GT(lines_size, lines_id_);
cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first,
new_height, new_width, is_color);
CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first;
read_time += timer.MicroSeconds();
timer.Start();
int offset = this->prefetch_data_.offset(item_id);
this->transformed_data_.set_cpu_data(prefetch_data + offset);
this->data_transformer_->Transform(cv_img, &(this->transformed_data_));
trans_time += timer.MicroSeconds();
//read Depths
//prefetch_label[item_id] = lines_[lines_id_].second;
float depths[74*74];
ReadDepthToArray(lines_[lines_id_].second, depths);
int depth_offset = this->prefetch_label_.offset(item_id);
memcpy(&prefetch_label[depth_offset], &depths[0], sizeof(depths));
// go to the next iter
lines_id_++;
if (lines_id_ >= lines_size) {
// We have reached the end. Restart from the first.
DLOG(INFO) << "Restarting data prefetching from start.";
lines_id_ = 0;
ShuffleImages();
}
}
batch_timer.Stop();
DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
DLOG(INFO) << " Read time: " << read_time / 1000 << " ms.";
DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
示例12: CHECK
void ImageDataLayer<Dtype>::load_batch(Batch<Dtype>* batch) {
CPUTimer batch_timer;
batch_timer.Start();
double read_time = 0;
double trans_time = 0;
CPUTimer timer;
CHECK(batch->data_.count());
CHECK(this->transformed_data_.count());
// 获取层参数,具体参见层参数的定义的解释
ImageDataParameter image_data_param = this->layer_param_.image_data_param();
const int batch_size = image_data_param.batch_size();
const int new_height = image_data_param.new_height();
const int new_width = image_data_param.new_width();
const bool is_color = image_data_param.is_color();
string root_folder = image_data_param.root_folder();
// Reshape according to the first image of each batch
// on single input batches allows for inputs of varying dimension.
// 读取跳过之后的第一幅图像,然后根据该图像设置相撞
cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first,
new_height, new_width, is_color);
CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first;
// Use data_transformer to infer the expected blob shape from a cv_img.
// 推断图像形状
vector<int> top_shape = this->data_transformer_->InferBlobShape(cv_img);
// 设置transformed_data_形状
this->transformed_data_.Reshape(top_shape);
// Reshape batch according to the batch_size.
// 设置batch_size
top_shape[0] = batch_size;
batch->data_.Reshape(top_shape);
Dtype* prefetch_data = batch->data_.mutable_cpu_data();
Dtype* prefetch_label = batch->label_.mutable_cpu_data();
// datum scales
// 读取一批图像,并进行预处理
const int lines_size = lines_.size();
for (int item_id = 0; item_id < batch_size; ++item_id) {
// get a blob
timer.Start();
CHECK_GT(lines_size, lines_id_);
cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first,
new_height, new_width, is_color);
CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first;
read_time += timer.MicroSeconds();
timer.Start();
// Apply transformations (mirror, crop...) to the image
// 进行预处理
// 根据图像的批次获得图像数据的偏移量
int offset = batch->data_.offset(item_id);
// 设置图像数据的指针到transformed_data_
this->transformed_data_.set_cpu_data(prefetch_data + offset);
// 进行预处理
this->data_transformer_->Transform(cv_img, &(this->transformed_data_));
trans_time += timer.MicroSeconds();//统计预处理时间
// 复制类标到prefetch_label
prefetch_label[item_id] = lines_[lines_id_].second;
// go to the next iter
lines_id_++;
// 是否是图像目录中的最后一个图像
if (lines_id_ >= lines_size) {
// We have reached the end. Restart from the first.
DLOG(INFO) << "Restarting data prefetching from start.";
lines_id_ = 0;
// 打乱图像索引的顺序
if (this->layer_param_.image_data_param().shuffle()) {
ShuffleImages();
}
}
}
batch_timer.Stop();
DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
DLOG(INFO) << " Read time: " << read_time / 1000 << " ms.";
// 预处理时间
DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
示例13: CHECK
void MultiLabelImageDataLayer<Dtype>::load_batch(Batch<Dtype>* batch) {
CPUTimer batch_timer;
batch_timer.Start();
double read_time = 0;
double trans_time = 0;
CPUTimer timer;
CHECK(batch->data_.count());
CHECK(this->transformed_data_.count());
ImageDataParameter image_data_param = this->layer_param_.image_data_param();
const int batch_size = image_data_param.batch_size();
const int new_height = image_data_param.new_height();
const int new_width = image_data_param.new_width();
const bool is_color = image_data_param.is_color();
string root_folder = image_data_param.root_folder();
// Reshape according to the first image of each batch
// on single input batches allows for inputs of varying dimension.
cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first,
new_height, new_width, is_color);
CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first;
// Use data_transformer to infer the expected blob shape from a cv_img.
vector<int> top_shape = this->data_transformer_->InferBlobShape(cv_img);
this->transformed_data_.Reshape(top_shape);
// Reshape batch according to the batch_size.
top_shape[0] = batch_size;
batch->data_.Reshape(top_shape);
Dtype* prefetch_data = batch->data_.mutable_cpu_data();
Dtype* prefetch_label = batch->label_.mutable_cpu_data();
Dtype* prefetch_mask = batch->mask_.mutable_cpu_data();
// datum scales
const int lines_size = lines_.size();
for (int item_id = 0; item_id < batch_size; ++item_id) {
// get a blob
timer.Start();
CHECK_GT(lines_size, lines_id_);
cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first,
new_height, new_width, is_color);
CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first;
read_time += timer.MicroSeconds();
timer.Start();
// Apply transformations (mirror, crop...) to the image
int offset = batch->data_.offset(item_id);
this->transformed_data_.set_cpu_data(prefetch_data + offset);
this->data_transformer_->Transform(cv_img, &(this->transformed_data_));
trans_time += timer.MicroSeconds();
for(int label_ind = 0; label_ind < label_num_; ++ label_ind){
int offset = batch->label_.offset(item_id,label_ind);
prefetch_label[offset] = lines_[lines_id_].second[label_ind] / scale_;
// prefetch_mask[offset] = static_cast<int>(prefetch_label[offset]) != 0 ? prefetch_label[offset] / 100.0 : Dtype(1.) ;
if (need_output_mask_){
if (static_cast<int>(prefetch_label[offset]) != 0){
prefetch_mask[offset] = 1.;
}else if (caffe_rng_rand() % 10 < 1){
prefetch_mask[offset] = 1.;
}else{
prefetch_mask[offset] = 0.;
}
}
// LOG(INFO) << " aimed labels " << prefetch_label[offset]<< " AIMed masks: " << prefetch_mask[offset];
}
// go to the next iter
lines_id_++;
if (lines_id_ >= lines_size) {
// We have reached the end. Restart from the first.
DLOG(INFO) << "Restarting data prefetching from start.";
lines_id_ = 0;
if (this->layer_param_.image_data_param().shuffle()) {
ShuffleImages();
}
}
}
batch_timer.Stop();
DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
DLOG(INFO) << " Read time: " << read_time / 1000 << " ms.";
DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
示例14: CHECK
void FlowDataLayer<Dtype>::InternalThreadEntry() {
CPUTimer batch_timer;
batch_timer.Start();
double read_time = 0;
double decompress_time = 0;
double trans_time = 0;
CPUTimer timer;
CHECK(this->prefetch_data_.count());
CHECK(this->transformed_data_.count());
FlowDataParameter flow_data_param = this->layer_param_.flow_data_param();
const int batch_size = flow_data_param.batch_size();
const int stack_size = flow_data_param.stack_size();
const int height = flow_field_->height();
const int width = flow_field_->width();
const int data_dim = height * width * 2;
Dtype* prefetch_data = this->prefetch_data_.mutable_cpu_data();
Dtype* prefetch_label = this->prefetch_label_.mutable_cpu_data();
Dtype* flow_stack_data = flow_stack_->mutable_cpu_data();
// datum scales
const int lines_size = lines_.size();
for (int item_id = 0; item_id < batch_size; ++item_id) {
CHECK_GT(lines_size, lines_id_ + stack_size - 1);
// Takes a step of random size.
if (flow_data_param.rand_step()) {
unsigned int skip = caffe_rng_rand() % flow_data_param.rand_step();
lines_id_ += (skip * stack_size);
lines_id_ = lines_id_ % lines_size;
}
prefetch_label[item_id] = lines_[lines_id_].second;
for (int flow_id = 0; flow_id < stack_size; ++flow_id) {
// reads a compressed flow field.
timer.Start();
cv::Mat cv_img = ReadImageToCVMat(lines_[lines_id_].first,
height, width, true);
CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first;
read_time += timer.MicroSeconds();
// Decompress the flow.
timer.Start();
flow_field_->set_cpu_data(flow_stack_data + data_dim * flow_id);
Decompress(cv_img, flow_field_.get());
decompress_time += timer.MicroSeconds();
lines_id_++;
}
// Apply transformations (mirror, crop...) to the flow stack.
int offset = this->prefetch_data_.offset(item_id);
this->transformed_data_.set_cpu_data(prefetch_data + offset);
this->data_transformer_->Transform(flow_stack_.get(),
&(this->transformed_data_));
trans_time += timer.MicroSeconds();
// go to the next iter
if (lines_id_ >= lines_size) {
// We have reached the end. Restart from the first.
DLOG(INFO) << "Restarting data prefetching from start.";
lines_id_ = 0;
}
}
batch_timer.Stop();
DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
DLOG(INFO) << " Read time: " << read_time / 1000 << " ms.";
DLOG(INFO) << " Decompress time: " << decompress_time / 1000 << " ms.";
DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
示例15: CHECK
void ImageLabelmapDataLayer<Dtype>::load_batch(LabelmapBatch<Dtype>* batch) {
CPUTimer batch_timer;
batch_timer.Start();
double read_time = 0;
double trans_time = 0;
CPUTimer timer;
CHECK(batch->data_.count());
CHECK(batch->labelmap_.count());
CHECK(this->transformed_data_.count());
CHECK(this->transformed_labelmap_.count());
ImageDataParameter image_data_param = this->layer_param_.image_data_param();
const int batch_size = image_data_param.batch_size();
const int new_height = image_data_param.new_height();
const int new_width = image_data_param.new_width();
const bool is_color = image_data_param.is_color();
string root_folder = image_data_param.root_folder();
// Reshape according to the first image of each batch
// on single input batches allows for inputs of varying dimension.
cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first,
new_height, new_width, is_color);
cv::Mat cv_gt = ReadImageToCVMat(root_folder + lines_[lines_id_].second,
new_height, new_width, 0);
CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first;
// Use data_transformer to infer the expected blob shape from a cv_img.
vector<int> top_shape = this->data_transformer_->InferBlobShape(cv_img);
vector<int> top_shape_labelmap = this->data_transformer_->InferBlobShape(cv_gt);
this->transformed_data_.Reshape(top_shape);
this->transformed_labelmap_.Reshape(top_shape_labelmap);
// Reshape prefetch_data and top[0] according to the batch_size.
top_shape[0] = batch_size;
top_shape_labelmap[0] = batch_size;
batch->data_.Reshape(top_shape);
batch->labelmap_.Reshape(top_shape_labelmap);
Dtype* prefetch_data = batch->data_.mutable_cpu_data();
Dtype* prefetch_labelmap = batch->labelmap_.mutable_cpu_data();
// datum scales
const int lines_size = lines_.size();
for (int item_id = 0; item_id < batch_size; ++item_id) {
// get a blob
timer.Start();
CHECK_GT(lines_size, lines_id_);
cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first,
0, 0, is_color);
cv::Mat cv_gt = ReadImageToCVMat(root_folder + lines_[lines_id_].second,
0, 0, 0);
CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first;
const int height = cv_img.rows;
const int width = cv_img.cols;
const int gt_channels = cv_gt.channels();
const int gt_height = cv_gt.rows;
const int gt_width = cv_gt.cols;
CHECK((height == gt_height) && (width == gt_width)) << "GT image size should be equal to true image size";
CHECK(gt_channels == 1) << "GT image channel number should be 1";
if (new_height > 0 && new_width > 0) {
cv::resize(cv_img, cv_img, cv::Size(new_width, new_height));
cv::resize(cv_gt, cv_gt, cv::Size(new_width, new_height), 0, 0, cv::INTER_LINEAR);
}
if (!cv_img.data || !cv_gt.data) {
continue;
}
read_time += timer.MicroSeconds();
timer.Start();
// Apply transformations (mirror, crop...) to the image
int offset = batch->data_.offset(item_id);
int offset_gt = batch->labelmap_.offset(item_id);
//CHECK(offset == offset_gt) << "fetching should be synchronized";
this->transformed_data_.set_cpu_data(prefetch_data + offset);
this->transformed_labelmap_.set_cpu_data(prefetch_labelmap + offset_gt);
std::pair<int, int> hw_off = this->data_transformer_->LocTransform(cv_img, &(this->transformed_data_));
cv::Mat encoded_gt;
//regression
encoded_gt = cv_gt/255;
//[***Cautions***]
//One small trick leveraging opencv roundoff feature for **consensus sampling** in Holistically-Nested Edge Detection paper.
//For general binary edge maps this is okay
//For 5-subject aggregated edge maps (BSDS), this will abandon weak edge points labeled by only two or less labelers.
this->data_transformer_->LabelmapTransform(encoded_gt, &(this->transformed_labelmap_), hw_off);
trans_time += timer.MicroSeconds();
// go to the next iter
lines_id_++;
if (lines_id_ >= lines_size) {
// We have reached the end. Restart from the first.
DLOG(INFO) << "Restarting data prefetching from start.";
lines_id_ = 0;
//.........这里部分代码省略.........