本文整理汇总了C++中Datum::float_data方法的典型用法代码示例。如果您正苦于以下问题:C++ Datum::float_data方法的具体用法?C++ Datum::float_data怎么用?C++ Datum::float_data使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Datum
的用法示例。
在下文中一共展示了Datum::float_data方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: DLOG
void DataReader::Body::read_one(db::Cursor* cursor, db::Transaction* dblt, QueuePair* qp) {
Datum* datum = qp->free_.pop();
// TODO deserialize in-place instead of copy?
datum->ParseFromString(cursor->value());
if (dblt != NULL) {
string labels;
CHECK_EQ(dblt->Get(cursor->key(), labels), 0);
Datum labelDatum;
labelDatum.ParseFromString(labels);
// datum->MergeFrom(labelDatum);
datum->set_channels(datum->channels() + labelDatum.channels());
datum->mutable_float_data()->MergeFrom(labelDatum.float_data());
datum->mutable_data()->append(labelDatum.data());
}
qp->full_.push(datum);
// go to the next iter
cursor->Next();
if (!cursor->valid()) {
DLOG(INFO) << "Restarting data prefetching from start.";
cursor->SeekToFirst();
}
}
示例2: assert
bool MostCV::LevelDBReader::GetNextEntry(string &key, vector<double> &retVec, int &label) {
if (!database_iter_->Valid())
return false;
Datum datum;
datum.clear_float_data();
datum.clear_data();
datum.ParseFromString(database_iter_->value().ToString());
key = database_iter_->key().ToString();
label = datum.label();
int expected_data_size = std::max<int>(datum.data().size(), datum.float_data_size());
const int datum_volume_size = datum.channels() * datum.height() * datum.width();
if (expected_data_size != datum_volume_size) {
cout << "Something wrong in saved data.";
assert(false);
}
retVec.resize(datum_volume_size);
const string& data = datum.data();
if (data.size() != 0) {
// Data stored in string, e.g. just pixel values of 196608 = 256 * 256 * 3
for (int i = 0; i < datum_volume_size; ++i)
retVec[i] = data[i];
} else {
// Data stored in real feature vector such as 4096 from feature extraction
for (int i = 0; i < datum_volume_size; ++i)
retVec[i] = datum.float_data(i);
}
database_iter_->Next();
++record_idx_;
return true;
}
示例3: LOG
void MyImageDataLayer<Dtype>::fetchData() {
Datum datum;
CHECK(prefetch_data_.count());
Dtype* top_data = prefetch_data_.mutable_cpu_data();
Dtype* top_label = prefetch_label_.mutable_cpu_data();
ImageDataParameter image_data_param = this->layer_param_.image_data_param();
const Dtype scale = image_data_param.scale();//image_data_layer相关参数
const int batch_size = 1;//image_data_param.batch_size(); 这里我们只需要一张图片
const int crop_size = image_data_param.crop_size();
const bool mirror = image_data_param.mirror();
const int new_height = image_data_param.new_height();
const int new_width = image_data_param.new_width();
if (mirror && crop_size == 0) {
LOG(FATAL) << "Current implementation requires mirror and crop_size to be "
<< "set at the same time.";
}
// datum scales
const int channels = datum_channels_;
const int height = datum_height_;
const int width = datum_width_;
const int size = datum_size_;
const int lines_size = lines_.size();
const Dtype* mean = data_mean_.cpu_data();
for (int item_id = 0; item_id < batch_size; ++item_id) {//读取一图片
// get a blob
CHECK_GT(lines_size, lines_id_);
if (!ReadImageToDatum(lines_[lines_id_].first,
lines_[lines_id_].second,
new_height, new_width, &datum)) {
continue;
}
const string& data = datum.data();
if (crop_size) {
CHECK(data.size()) << "Image cropping only support uint8 data";
int h_off, w_off;
// We only do random crop when we do training.
h_off = (height - crop_size) / 2;
w_off = (width - crop_size) / 2;
// Normal copy 正常读取,把裁剪后的图片数据读给top_data
for (int c = 0; c < channels; ++c) {
for (int h = 0; h < crop_size; ++h) {
for (int w = 0; w < crop_size; ++w) {
int top_index = ((item_id * channels + c) * crop_size + h)
* crop_size + w;
int data_index = (c * height + h + h_off) * width + w + w_off;
Dtype datum_element =
static_cast<Dtype>(static_cast<uint8_t>(data[data_index]));
top_data[top_index] = (datum_element - mean[data_index]) * scale;
}
}
}
} else {
// Just copy the whole data 正常读取,把图片数据读给top_data
if (data.size()) {
for (int j = 0; j < size; ++j) {
Dtype datum_element =
static_cast<Dtype>(static_cast<uint8_t>(data[j]));
top_data[item_id * size + j] = (datum_element - mean[j]) * scale;
}
} else {
for (int j = 0; j < size; ++j) {
top_data[item_id * size + j] =
(datum.float_data(j) - mean[j]) * scale;
}
}
}
top_label[item_id] = datum.label();//读取该图片的标签
}
}
示例4: img
//.........这里部分代码省略.........
CHECK_EQ(img_channels, data_mean_.channels());
if( (img_height == data_mean_.height() && img_width == data_mean_.width() ) || (crop_size == data_mean_.height() && crop_size == data_mean_.width() ) )
{
mean = data_mean_.mutable_cpu_data();
}
else
{
CHECK_EQ(img_height, data_mean_.height());
CHECK_EQ(img_width, data_mean_.width());
}
}
if (has_mean_values) {
CHECK(mean_values_.size() == 1 || mean_values_.size() == img_channels) <<
"Specify either 1 mean_value or as many as channels: " << img_channels;
if (img_channels > 1 && mean_values_.size() == 1) {
// Replicate the mean_value for simplicity
for (int c = 1; c < img_channels; ++c) {
mean_values_.push_back(mean_values_[0]);
}
}
}
//cv::Mat cv_cropped_img = cv_img;
if (crop_size) {
CHECK_EQ(crop_size, height);
CHECK_EQ(crop_size, width);
// We only do random crop when we do training.
if (phase_ == TRAIN) {
if (h_off == -1 && w_off == -1)
{
h_off = Rand(img_height - crop_size + 1);
w_off = Rand(img_width - crop_size + 1);
}
}
else {
if (h_off == -1 && w_off == -1)
{
h_off = (img_height - crop_size) / 2;
w_off = (img_width - crop_size) / 2;
}
}
//cv::Rect roi(w_off, h_off, crop_size, crop_size);
//cv_cropped_img = cv_img(roi);
}
else {
h_off = 0;
w_off = 0;
CHECK_EQ(img_height, height);
CHECK_EQ(img_width, width);
}
//CHECK(cv_cropped_img.data);
Dtype* transformed_data = transformed_blob->mutable_cpu_data();
int top_index;
// debug
/*char ss1[1010];
sprintf(ss1,"/home/xiaolonw/opt_flows/temp_results/sth.jpg");
cv::Mat img(Size(crop_size, crop_size), CV_8UC1);*/
for (int h = 0; h < height; ++h) {
int img_index = 0;
for (int w = 0; w < width; ++w) {
for (int c = 0; c < img_channels; ++c) {
float now_col = col_ranges[c];
if (do_mirror) {
top_index = (c * height + h) * width + (width - 1 - w);
} else {
top_index = (c * height + h) * width + w;
}
img_index = (c * img_height + h + h_off) * img_width + w + w_off;
Dtype pixel = datum.float_data(img_index);
// color augmentation
pixel = std::min( pixel * now_col, Dtype(255.0) );
// debug
//img.at<uchar>(h, w) = (uchar)(pixel);
if (has_mean_file) {
int mean_index = (c * img_height + h_off + h) * img_width + w_off + w;
if (crop_size == data_mean_.height() && crop_size == data_mean_.width() )
{
mean_index = (c * height + h) * width + w;
}
transformed_data[top_index] = (pixel - mean[mean_index]) * scale;
} else {
if (has_mean_values) {
transformed_data[top_index] =
(pixel - mean_values_[c]) * scale;
} else {
transformed_data[top_index] = pixel * scale;
}
}
}
}
}
//imwrite(ss1,img);
}
示例5: main
int main(int argc, char** argv) {
::google::InitGoogleLogging(argv[0]);
std::ifstream infile(argv[1]);
std::vector<std::pair<string, int> > lines;
string filename;
int label;
while (infile >> filename >> label) {
lines.push_back(std::make_pair(filename, label));
}
LOG(INFO) << "A total of " << lines.size() << " images.";
Datum datum;
BlobProto sum_blob;
int count = 0;
if (!ReadImageToDatum(lines[0].first, lines[0].second,
resize_height, resize_width, is_color, &datum)) {
return -1;
}
sum_blob.set_num(1);
sum_blob.set_channels(datum.channels());
sum_blob.set_height(datum.height());
sum_blob.set_width(datum.width());
const int data_size = datum.channels() * datum.height() * datum.width();
int size_in_datum = std::max<int>(datum.data().size(),
datum.float_data_size());
for (int i = 0; i < size_in_datum; ++i) {
sum_blob.add_data(0.);
}
LOG(INFO) << "Starting Iteration";
for (int line_id = 0; line_id < lines.size(); ++line_id) {
if (!ReadImageToDatum(lines[line_id].first, lines[line_id].second,
resize_height, resize_width, is_color, &datum)) {
continue;
}
const string& data = datum.data();
size_in_datum = std::max<int>(datum.data().size(),
datum.float_data_size());
CHECK_EQ(size_in_datum, data_size) << "Incorrect data field size " <<
size_in_datum;
if (data.size() != 0) {
for (int i = 0; i < size_in_datum; ++i) {
sum_blob.set_data(i, sum_blob.data(i) + (uint8_t)data[i]);
}
} else {
for (int i = 0; i < size_in_datum; ++i) {
sum_blob.set_data(i, sum_blob.data(i) +
static_cast<float>(datum.float_data(i)));
}
}
++count;
}
for (int i = 0; i < sum_blob.data_size(); ++i) {
sum_blob.set_data(i, sum_blob.data(i) / count);
}
// Write to disk
LOG(INFO) << "Write to " << argv[2];
WriteProtoToBinaryFile(sum_blob, argv[2]);
return 0;
}
示例6: PoseImageDataLayerPrefetch
//.........这里部分代码省略.........
if(color_aug)
{
thisRand = random(0.8,1.2);
}
for (int h = 0; h < new_height; ++h)
{
for (int w = 0; w < new_width; ++w)
{
int top_index = ((item_id * channels + c)
* new_height + h) * new_width + w;
int data_index = (c * height + h + h_off) * width
+ w + w_off;
Dtype datum_element =
static_cast<Dtype>(static_cast<uint8_t>(data[data_index]));
top_data[top_index] = (datum_element
- mean[data_index]) * scale;
//img.at<cv::Vec3b>(h, w)[c] = (uchar)(datum_element * scale) * thisRand;
top_data[top_index] = min(top_data[top_index] * thisRand, (Dtype)(255.0));
}
}
}
//imwrite(ss1, img);
}
}
else
{
// Just copy the whole data
if (data.size())
{
for (int j = 0; j < size; ++j)
{
Dtype datum_element =
static_cast<Dtype>(static_cast<uint8_t>(data[j]));
top_data[item_id * size + j] = (datum_element - mean[j])
* scale;
}
}
else
{
for (int j = 0; j < size; ++j)
{
top_data[item_id * size + j] = (datum.float_data(j)
- mean[j]) * scale;
}
}
}
float lblratio = new_height / out_height;
vector<int> pts;
for (int label_i = 0; label_i < datum.label_size(); label_i++)
{
pts.push_back( datum.label(label_i) / lblratio );
}
int lblLen = key_point_num * out_height * out_width;
PoseReadLabel(pts, was, top_label + item_id * lblLen, out_height, out_width);
/*for(int ci = 0; ci < key_point_num; ci ++)
{
Mat img(Size(out_height, out_width), CV_8UC3);
sprintf(ss2,"/home/dragon123/cnncode/showimg/%d_%d_gt.jpg",item_id, ci);
for(int h = 0; h < out_height; h ++)
for(int w = 0; w < out_width; w ++)
{
int clr = top_label[item_id * lblLen + ci * out_height * out_width + h * out_width + w];
if(clr <= 0)
{
if(clr == 0) for(int c = 0; c < 3; c ++) img.at<cv::Vec3b>(h, w)[c] = 0;
if(clr < 0) for(int c = 0; c < 3; c ++) img.at<cv::Vec3b>(h, w)[c] = 128;
}
else
{
for(int c = 0; c < 3; c ++) img.at<cv::Vec3b>(h, w)[c] = 255;
}
}
imwrite(ss2, img);
}*/
// go to the next iter
layer->lines_id_++;
if (layer->lines_id_ >= lines_size)
{
// We have reached the end. Restart from the first.
DLOG(INFO) << "Restarting data prefetching from start.";
layer->lines_id_ = 0;
if (layer->layer_param_.pose_image_data_param().shuffle())
{
layer->ShuffleImages();
}
}
}
delete was;
return reinterpret_cast<void*>(NULL);
}
示例7: CHECK
void DataLstmTrainHistLayer<Dtype>::InternalThreadEntry() {
CPUTimer batch_timer;
batch_timer.Start();
double read_time = 0;
double trans_time = 0;
CPUTimer timer;
CHECK(this->prefetch_data_.count());
Datum datum;
Dtype* top_data = this->prefetch_data_.mutable_cpu_data();
Dtype* top_label = this->prefetch_label_.mutable_cpu_data();
Dtype* top_hist = this->prefetch_hist_.mutable_cpu_data();
Dtype* top_marker = this->prefetch_marker_.mutable_cpu_data();
// datum scales
const int size = resize_height*resize_width*3;
const Dtype* mean = this->data_mean_.mutable_cpu_data();
string value;
const int kMaxKeyLength = 256;
char key_cstr[kMaxKeyLength];
int key;
const int sequence_size = this->layer_param_.data_lstm_train_hist_param().sequence_size();
const int ind_seq_num=this->layer_param_.data_lstm_train_hist_param().sequence_num();
const int interval=this->layer_param_.data_lstm_train_hist_param().interval();
int item_id;
for (int time_id = 0; time_id < sequence_size; ++time_id) {
for (int seq_id = 0; seq_id < ind_seq_num; ++seq_id) {
item_id=time_id*ind_seq_num+seq_id;
timer.Start();
// get a blob
key=buffer_key[seq_id]; // MUST be changed according to the size of the training set
snprintf(key_cstr, kMaxKeyLength, "%08d", key);
db_->Get(leveldb::ReadOptions(), string(key_cstr), &value);
datum.ParseFromString(value);
const string& data = datum.data();
read_time += timer.MicroSeconds();
timer.Start();
for (int j = 0; j < size; ++j) {
Dtype datum_element = static_cast<Dtype>(static_cast<uint8_t>(data[j]));
top_data[item_id * size + j] = (datum_element - mean[j]);
}
for (int j = 0; j < para_dim; ++j) {
top_label[item_id * para_dim + j] = datum.float_data(j);
}
top_marker[item_id] = datum.float_data(para_dim);
if (buffer_marker[seq_id] == 0) {
top_marker[item_id] = 0;
buffer_marker[seq_id] = 1;
}
//////////////////////////////////// for hist
if (top_marker[item_id] < 0.5) {
for (int j = 0; j < para_dim; ++j)
top_hist[item_id * para_dim + j] = 0;
} else {
if (time_id == 0) {
top_hist[item_id * para_dim + 0] = hist_blob[seq_id * para_dim + 0]/1.1+0.5;
top_hist[item_id * para_dim + 1] = hist_blob[seq_id * para_dim + 1]*0.17778+1.34445;
top_hist[item_id * para_dim + 2] = hist_blob[seq_id * para_dim + 2]*0.14545+0.39091;
top_hist[item_id * para_dim + 3] = hist_blob[seq_id * para_dim + 3]*0.17778-0.34445;
top_hist[item_id * para_dim + 4] = hist_blob[seq_id * para_dim + 4]/95.0+0.12;
top_hist[item_id * para_dim + 5] = hist_blob[seq_id * para_dim + 5]/95.0+0.12;
top_hist[item_id * para_dim + 6] = hist_blob[seq_id * para_dim + 6]*0.14545+1.48181;
top_hist[item_id * para_dim + 7] = hist_blob[seq_id * para_dim + 7]*0.16+0.98;
top_hist[item_id * para_dim + 8] = hist_blob[seq_id * para_dim + 8]*0.16+0.02;
top_hist[item_id * para_dim + 9] = hist_blob[seq_id * para_dim + 9]*0.14545-0.48181;
top_hist[item_id * para_dim + 10] = hist_blob[seq_id * para_dim + 10]/95.0+0.12;
top_hist[item_id * para_dim + 11] = hist_blob[seq_id * para_dim + 11]/95.0+0.12;
top_hist[item_id * para_dim + 12] = hist_blob[seq_id * para_dim + 12]/95.0+0.12;
top_hist[item_id * para_dim + 13] = hist_blob[seq_id * para_dim + 13]*0.6+0.2;
} else {
int pre_id=(time_id-1)*ind_seq_num+seq_id;
top_hist[item_id * para_dim + 0] = top_label[pre_id * para_dim + 0]/1.1+0.5;
top_hist[item_id * para_dim + 1] = top_label[pre_id * para_dim + 1]*0.17778+1.34445;
top_hist[item_id * para_dim + 2] = top_label[pre_id * para_dim + 2]*0.14545+0.39091;
top_hist[item_id * para_dim + 3] = top_label[pre_id * para_dim + 3]*0.17778-0.34445;
top_hist[item_id * para_dim + 4] = top_label[pre_id * para_dim + 4]/95.0+0.12;
top_hist[item_id * para_dim + 5] = top_label[pre_id * para_dim + 5]/95.0+0.12;
top_hist[item_id * para_dim + 6] = top_label[pre_id * para_dim + 6]*0.14545+1.48181;
top_hist[item_id * para_dim + 7] = top_label[pre_id * para_dim + 7]*0.16+0.98;
top_hist[item_id * para_dim + 8] = top_label[pre_id * para_dim + 8]*0.16+0.02;
top_hist[item_id * para_dim + 9] = top_label[pre_id * para_dim + 9]*0.14545-0.48181;
top_hist[item_id * para_dim + 10] = top_label[pre_id * para_dim + 10]/95.0+0.12;
top_hist[item_id * para_dim + 11] = top_label[pre_id * para_dim + 11]/95.0+0.12;
top_hist[item_id * para_dim + 12] = top_label[pre_id * para_dim + 12]/95.0+0.12;
top_hist[item_id * para_dim + 13] = top_label[pre_id * para_dim + 13]*0.6+0.2;
}
}
//////////////////////////////////// for hist
//.........这里部分代码省略.........
示例8: LOG
void DataTransformer<Dtype>::Transform(const int batch_item_id,
const Datum& datum,
const Dtype* mean,
Dtype* transformed_data) {
const string& data = datum.data();
const int channels = datum.channels();
const int height = datum.height();
const int width = datum.width();
const int size = datum.channels() * datum.height() * datum.width();
const int crop_size = param_.crop_size();
const bool mirror = param_.mirror();
const Dtype scale = param_.scale();
if (mirror && crop_size == 0) {
LOG(FATAL) << "Current implementation requires mirror and crop_size to be "
<< "set at the same time.";
}
if (crop_size) {
CHECK(data.size()) << "Image cropping only support uint8 data";
int h_off, w_off;
// We only do random crop when we do training.
if (phase_ == Caffe::TRAIN) {
h_off = Rand() % (height - crop_size);
w_off = Rand() % (width - crop_size);
} else {
h_off = (height - crop_size) / 2;
w_off = (width - crop_size) / 2;
}
if (mirror && Rand() % 2) {
// Copy mirrored version
for (int c = 0; c < channels; ++c) {
for (int h = 0; h < crop_size; ++h) {
for (int w = 0; w < crop_size; ++w) {
int data_index = (c * height + h + h_off) * width + w + w_off;
int top_index = ((batch_item_id * channels + c) * crop_size + h)
* crop_size + (crop_size - 1 - w);
Dtype datum_element =
static_cast<Dtype>(static_cast<uint8_t>(data[data_index]));
transformed_data[top_index] =
(datum_element - mean[data_index]) * scale;
}
}
}
} else {
// Normal copy
for (int c = 0; c < channels; ++c) {
for (int h = 0; h < crop_size; ++h) {
for (int w = 0; w < crop_size; ++w) {
int top_index = ((batch_item_id * channels + c) * crop_size + h)
* crop_size + w;
int data_index = (c * height + h + h_off) * width + w + w_off;
Dtype datum_element =
static_cast<Dtype>(static_cast<uint8_t>(data[data_index]));
transformed_data[top_index] =
(datum_element - mean[data_index]) * scale;
}
}
}
}
} else {
// we will prefer to use data() first, and then try float_data()
if (data.size()) {
for (int j = 0; j < size; ++j) {
Dtype datum_element =
static_cast<Dtype>(static_cast<uint8_t>(data[j]));
transformed_data[j + batch_item_id * size] =
(datum_element - mean[j]) * scale;
}
} else {
for (int j = 0; j < size; ++j) {
transformed_data[j + batch_item_id * size] =
(datum.float_data(j) - mean[j]) * scale;
}
}
}
}
示例9: if
void DataTransformer<Dtype>::Transform(const int batch_item_id,
const Datum& datum,
const Dtype* mean,
Dtype* transformed_data, Dtype* temp_data) {
const string& data = datum.data();
const int channels = datum.channels();
const int height = datum.height();
const int width = datum.width();
const int size = datum.channels() * datum.height() * datum.width();
const int crop_size = param_.crop_size();
const bool mirror = param_.mirror();
const bool rotate = param_.rotate();
const Dtype scale = param_.scale();
const int window_size = param_.window_size();
int newHeight = datum.height();
int newWidth = datum.width();
if(crop_size) {
newHeight = crop_size;
newWidth = crop_size;
} else if(window_size) {
newHeight = window_size;
newWidth = window_size;
}
if (window_size && crop_size) {
LOG(FATAL) << "Current implementation does not support window_size and crop_size to be "
<< "set at the same time.";
}
if (crop_size || window_size) {
CHECK(data.size()) << "Image cropping only support uint8 data";
int h_off, w_off;
// We only do random crop when we do training.
if (phase_ == Caffe::TRAIN && crop_size) {
h_off = Rand() % (height - crop_size);
w_off = Rand() % (width - crop_size);
} else {
h_off = (height - newHeight) / 2;
w_off = (width - newWidth) / 2;
}
// Normal copy
for (int c = 0; c < channels; ++c) {
for (int h = 0; h < newHeight; ++h) {
for (int w = 0; w < newWidth; ++w) {
int top_index = ((batch_item_id * channels + c) * newHeight + h)
* newWidth + w;
int data_index = (c * height + h + h_off) * width + w + w_off;
Dtype datum_element =
static_cast<Dtype>(static_cast<uint8_t>(data[data_index]));
transformed_data[top_index] =
(datum_element - mean[data_index]) * scale;
}
}
}
} else {
//LOG(INFO) << ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Normal::" << batch_item_id;
// we will prefer to use data() first, and then try float_data()
if (data.size()) {
for (int j = 0; j < size; ++j) {
Dtype datum_element =
static_cast<Dtype>(static_cast<uint8_t>(data[j]));
transformed_data[j + batch_item_id * size] =
(datum_element - mean[j]) * scale;
}
} else {
for (int j = 0; j < size; ++j) {
transformed_data[j + batch_item_id * size] =
(datum.float_data(j) - mean[j]) * scale;
}
}
}
//Perform mirroring on the transformed_data using a temp_data first then copy it back
if (mirror && Rand() % 3) {
// Copy mirrored version
if(Rand()%2){ //Mirror vertical
//LOG(INFO) << ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Mirror vertical::" << batch_item_id;
for (int c = 0; c < channels; ++c) {
for (int h = 0; h < newHeight; ++h) {
for (int w = 0; w < newWidth; ++w) {
int data_index = ((batch_item_id * channels + c) * newHeight + h) * newWidth + w;
int top_index = ((batch_item_id * channels + c) * newHeight + h)
* newWidth + (newWidth - 1 - w);
Dtype datum_element =
static_cast<Dtype>(static_cast<uint8_t>(transformed_data[data_index]));
temp_data[top_index] = datum_element;
}
}
}
}else{ //Mirror horizontal
//LOG(INFO) << ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Mirror horizontal::" << batch_item_id;
for (int c = 0; c < channels; ++c) {
for (int h = 0; h < newHeight; ++h) {
for (int w = 0; w < newWidth; ++w) {
int data_index = ((batch_item_id * channels + c) * newHeight + h) * newWidth + w;
//.........这里部分代码省略.........
示例10: in_stream
//.........这里部分代码省略.........
CHECK(mean_values_.size() == 1 || mean_values_.size() == datum_channels) <<
"Specify either 1 mean_value or as many as channels: " << datum_channels;
if (datum_channels > 1 && mean_values_.size() == 1) {
// Replicate the mean_value for simplicity
for (int c = 1; c < datum_channels; ++c) {
mean_values_.push_back(mean_values_[0]);
}
}
}
int height = datum_height;
int width = datum_width;
int h_off = 0;
int w_off = 0;
if (crop_size) {
height = crop_size;
width = crop_size;
// We only do random crop when we do training.
if (phase_ == TRAIN) {
h_off = Rand(datum_height - crop_size + 1);
w_off = Rand(datum_width - crop_size + 1);
} else {
h_off = (datum_height - crop_size) / 2;
w_off = (datum_width - crop_size) / 2;
}
}
//aki_update_start
//use the multiview strategy in testing
const bool use_multiview = param_.multi_view();
if (use_multiview) {
std::ifstream in_stream(std::string("multiview_cache").c_str());
int view_type = 0;
in_stream >> view_type;
in_stream.close();
if (view_type > 5)
{
//it means we have to use mirror right here
do_mirror = true;
view_type-=5;
}
switch(view_type){
case 1:
h_off = 0;
w_off = 0;
break;
case 2:
h_off = 0;
w_off = datum_width - crop_size;
break;
case 3:
h_off = datum_width - crop_size;
w_off = 0;
break;
case 4:
h_off = datum_width - crop_size;
w_off = datum_width - crop_size;
break;
case 5:
h_off = (datum_height - crop_size) / 2;
w_off = (datum_width - crop_size) / 2;
break;
default:
break;
}
}
//aki_update_end
Dtype datum_element;
int top_index, data_index;
for (int c = 0; c < datum_channels; ++c) {
for (int h = 0; h < height; ++h) {
for (int w = 0; w < width; ++w) {
data_index = (c * datum_height + h_off + h) * datum_width + w_off + w;
if (do_mirror) {
top_index = (c * height + h) * width + (width - 1 - w);
} else {
top_index = (c * height + h) * width + w;
}
if (has_uint8) {
datum_element =
static_cast<Dtype>(static_cast<uint8_t>(data[data_index]));
} else {
datum_element = datum.float_data(data_index);
}
if (has_mean_file) {
transformed_data[top_index] =
(datum_element - mean[data_index]) * scale;
} else {
if (has_mean_values) {
transformed_data[top_index] =
(datum_element - mean_values_[c]) * scale;
} else {
transformed_data[top_index] = datum_element * scale;
}
}
}
}
}
}
示例11: DataLayerPrefetch
void* DataLayerPrefetch(void* layer_pointer) {
CHECK(layer_pointer);
DataLayer<Dtype>* layer = static_cast<DataLayer<Dtype>*>(layer_pointer);
CHECK(layer);
Datum datum;
CHECK(layer->prefetch_data_);
Dtype* top_data = layer->prefetch_data_->mutable_cpu_data();
Dtype* top_label;
if (layer->output_labels_) {
top_label = layer->prefetch_label_->mutable_cpu_data();
}
const Dtype scale = layer->layer_param_.data_param().scale();
const int batch_size = layer->layer_param_.data_param().batch_size();
const int crop_size = layer->layer_param_.data_param().crop_size();
const bool mirror = layer->layer_param_.data_param().mirror();
if (mirror && crop_size == 0) {
LOG(FATAL) << "Current implementation requires mirror and crop_size to be "
<< "set at the same time.";
}
// datum scales
const int channels = layer->datum_channels_;
const int height = layer->datum_height_;
const int width = layer->datum_width_;
const int size = layer->datum_size_;
const Dtype* mean = layer->data_mean_.cpu_data();
for (int item_id = 0; item_id < batch_size; ++item_id) {
// get a blob
CHECK(layer->iter_);
CHECK(layer->iter_->Valid());
datum.ParseFromString(layer->iter_->value().ToString());
const string& data = datum.data();
if (crop_size) {
CHECK(data.size()) << "Image cropping only support uint8 data";
int h_off, w_off;
// We only do random crop when we do training.
if (layer->phase_ == Caffe::TRAIN) {
h_off = layer->PrefetchRand() % (height - crop_size);
w_off = layer->PrefetchRand() % (width - crop_size);
} else {
h_off = (height - crop_size) / 2;
w_off = (width - crop_size) / 2;
}
if (mirror && layer->PrefetchRand() % 2) {
// Copy mirrored version
for (int c = 0; c < channels; ++c) {
for (int h = 0; h < crop_size; ++h) {
for (int w = 0; w < crop_size; ++w) {
int top_index = ((item_id * channels + c) * crop_size + h)
* crop_size + (crop_size - 1 - w);
int data_index = (c * height + h + h_off) * width + w + w_off;
Dtype datum_element =
static_cast<Dtype>(static_cast<uint8_t>(data[data_index]));
top_data[top_index] = (datum_element - mean[data_index]) * scale;
}
}
}
} else {
// Normal copy
for (int c = 0; c < channels; ++c) {
for (int h = 0; h < crop_size; ++h) {
for (int w = 0; w < crop_size; ++w) {
int top_index = ((item_id * channels + c) * crop_size + h)
* crop_size + w;
int data_index = (c * height + h + h_off) * width + w + w_off;
Dtype datum_element =
static_cast<Dtype>(static_cast<uint8_t>(data[data_index]));
top_data[top_index] = (datum_element - mean[data_index]) * scale;
}
}
}
}
} else {
// we will prefer to use data() first, and then try float_data()
if (data.size()) {
for (int j = 0; j < size; ++j) {
Dtype datum_element =
static_cast<Dtype>(static_cast<uint8_t>(data[j]));
top_data[item_id * size + j] = (datum_element - mean[j]) * scale;
}
} else {
for (int j = 0; j < size; ++j) {
top_data[item_id * size + j] =
(datum.float_data(j) - mean[j]) * scale;
}
}
}
if (layer->output_labels_) {
top_label[item_id] = datum.label();
}
// go to the next iter
layer->iter_->Next();
if (!layer->iter_->Valid()) {
// We have reached the end. Restart from the first.
DLOG(INFO) << "Restarting data prefetching from start.";
layer->iter_->SeekToFirst();
}
}
//.........这里部分代码省略.........
示例12: CHECK
void DataDrivingLayer<Dtype>::InternalThreadEntry() {
CPUTimer batch_timer;
batch_timer.Start();
double read_time = 0;
double trans_time = 0;
CPUTimer timer;
CHECK(this->prefetch_data_.count());
Datum datum;
Dtype* top_data = this->prefetch_data_.mutable_cpu_data();
Dtype* top_label = this->prefetch_label_.mutable_cpu_data();
// datum scales
const int size = resize_height*resize_width*3;
const Dtype* mean = this->data_mean_.mutable_cpu_data();
string value;
const int kMaxKeyLength = 256;
char key_cstr[kMaxKeyLength];
int key;
const int batch_size = this->layer_param_.data_driving_param().batch_size();
for (int item_id = 0; item_id < batch_size; ++item_id) {
timer.Start();
// get a blob
key=random(484815)+1; // MUST be changed according to the size of the training set
snprintf(key_cstr, kMaxKeyLength, "%08d", key);
db_->Get(leveldb::ReadOptions(), string(key_cstr), &value);
datum.ParseFromString(value);
const string& data = datum.data();
read_time += timer.MicroSeconds();
timer.Start();
for (int j = 0; j < size; ++j) {
Dtype datum_element = static_cast<Dtype>(static_cast<uint8_t>(data[j]));
top_data[item_id * size + j] = (datum_element - mean[j]);
}
for (int j = 0; j < para_dim; ++j) {
top_label[item_id*para_dim+j] = datum.float_data(j);
}
trans_time += timer.MicroSeconds();
/*
for (int h = 0; h < resize_height; ++h) {
for (int w = 0; w < resize_width; ++w) {
leveldbTrain->imageData[(h*resize_width+w)*3+0]=(uint8_t)data[h*resize_width+w];
leveldbTrain->imageData[(h*resize_width+w)*3+1]=(uint8_t)data[resize_height*resize_width+h*resize_width+w];
leveldbTrain->imageData[(h*resize_width+w)*3+2]=(uint8_t)data[resize_height*resize_width*2+h*resize_width+w];
//leveldbTrain->imageData[(h*resize_width+w)*3+0]=(uint8_t)top_data[item_id * size+h*resize_width+w];
//leveldbTrain->imageData[(h*resize_width+w)*3+1]=(uint8_t)top_data[item_id * size+resize_height*resize_width+h*resize_width+w];
//leveldbTrain->imageData[(h*resize_width+w)*3+2]=(uint8_t)top_data[item_id * size+resize_height*resize_width*2+h*resize_width+w];
}
}
cvShowImage("Image from leveldb", leveldbTrain);
cvWaitKey( 1 );
*/
}
batch_timer.Stop();
DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
DLOG(INFO) << " Read time: " << read_time / 1000 << " ms.";
DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
示例13: main
int main(int argc, char** argv) {
::google::InitGoogleLogging(argv[0]);
if (argc < 5) {
LOG(ERROR) << "Usage: compute_image_mean input_list new_height new_width output_file [dropping_rate]";
return 1;
}
char* fn_list = argv[1];
const int height = atoi(argv[2]);
const int width = atoi(argv[3]);
char* fn_output = argv[4];
int sampling_rate = 1;
if (argc >= 6){
sampling_rate = atoi(argv[5]);
LOG(INFO) << "using sampling rate " << sampling_rate;
}
Datum datum;
BlobProto sum_blob;
int count = 0;
std::ifstream infile(fn_list);
string fn_frm;
int label;
infile >> fn_frm >> label;
ReadImageToDatum(fn_frm, label, height, width, &datum);
sum_blob.set_num(1);
sum_blob.set_channels(datum.channels());
sum_blob.set_length(1);
sum_blob.set_height(datum.height());
sum_blob.set_width(datum.width());
const int data_size = datum.channels() * datum.height() * datum.width();
int size_in_datum = std::max<int>(datum.data().size(),
datum.float_data_size());
for (int i = 0; i < size_in_datum; ++i) {
sum_blob.add_data(0.);
}
LOG(INFO) << "Starting Iteration";
int i = 0;
while (infile >> fn_frm >> label) {
i++;
if (i % sampling_rate!=0){
continue;
}
ReadImageToDatum(fn_frm, label, height, width, &datum);
const string& data = datum.data();
size_in_datum = std::max<int>(datum.data().size(), datum.float_data_size());
CHECK_EQ(size_in_datum, data_size) << "Incorrect data field size " <<
size_in_datum;
if (data.size() != 0) {
for (int i = 0; i < size_in_datum; ++i) {
sum_blob.set_data(i, sum_blob.data(i) + (uint8_t)data[i]);
}
} else {
for (int i = 0; i < size_in_datum; ++i) {
sum_blob.set_data(i, sum_blob.data(i) +
static_cast<float>(datum.float_data(i)));
}
}
++count;
if (count % 10000 == 0) {
LOG(ERROR) << "Processed " << count << " files.";
}
}
infile.close();
if (count % 10000 != 0) {
LOG(ERROR) << "Processed " << count << " files.";
}
for (int i = 0; i < sum_blob.data_size(); ++i) {
sum_blob.set_data(i, sum_blob.data(i) / count);
}
// Write to disk
LOG(INFO) << "Write to " << fn_output;
WriteProtoToBinaryFile(sum_blob, fn_output);
return 0;
}
示例14: main
int main(int argc, char** argv) {
::google::InitGoogleLogging(argv[0]);
if (argc < 3 || argc > 4) {
LOG(ERROR) << "Usage: compute_image_mean input_db output_file"
<< " db_backend[leveldb or lmdb]";
return 1;
}
string db_backend = "lmdb";
if (argc == 4) {
db_backend = string(argv[3]);
}
// Open leveldb
leveldb::DB* db;
leveldb::Options options;
options.create_if_missing = false;
leveldb::Iterator* it = NULL;
// lmdb
MDB_env* mdb_env;
MDB_dbi mdb_dbi;
MDB_val mdb_key, mdb_value;
MDB_txn* mdb_txn;
MDB_cursor* mdb_cursor;
// Open db
if (db_backend == "leveldb") { // leveldb
LOG(INFO) << "Opening leveldb " << argv[1];
leveldb::Status status = leveldb::DB::Open(
options, argv[1], &db);
CHECK(status.ok()) << "Failed to open leveldb " << argv[1];
leveldb::ReadOptions read_options;
read_options.fill_cache = false;
it = db->NewIterator(read_options);
it->SeekToFirst();
} else if (db_backend == "lmdb") { // lmdb
LOG(INFO) << "Opening lmdb " << argv[1];
CHECK_EQ(mdb_env_create(&mdb_env), MDB_SUCCESS) << "mdb_env_create failed";
CHECK_EQ(mdb_env_set_mapsize(mdb_env, 1099511627776), MDB_SUCCESS); // 1TB
CHECK_EQ(mdb_env_open(mdb_env, argv[1], MDB_RDONLY, 0664),
MDB_SUCCESS) << "mdb_env_open failed";
CHECK_EQ(mdb_txn_begin(mdb_env, NULL, MDB_RDONLY, &mdb_txn), MDB_SUCCESS)
<< "mdb_txn_begin failed";
CHECK_EQ(mdb_open(mdb_txn, NULL, 0, &mdb_dbi), MDB_SUCCESS)
<< "mdb_open failed";
CHECK_EQ(mdb_cursor_open(mdb_txn, mdb_dbi, &mdb_cursor), MDB_SUCCESS)
<< "mdb_cursor_open failed";
CHECK_EQ(mdb_cursor_get(mdb_cursor, &mdb_key, &mdb_value, MDB_FIRST),
MDB_SUCCESS);
} else {
LOG(FATAL) << "Unknown db backend " << db_backend;
}
// set size info
Datum datum;
BlobProto sum_blob;
int count = 0;
// load first datum
if (db_backend == "leveldb") {
datum.ParseFromString(it->value().ToString());
} else if (db_backend == "lmdb") {
datum.ParseFromArray(mdb_value.mv_data, mdb_value.mv_size);
} else {
LOG(FATAL) << "Unknown db backend " << db_backend;
}
sum_blob.set_num(1);
sum_blob.set_channels(datum.channels());
sum_blob.set_height(datum.height());
sum_blob.set_width(datum.width());
const int data_size = datum.channels() * datum.height() * datum.width();
int size_in_datum = std::max<int>(datum.data().size(),
datum.float_data_size());
for (int i = 0; i < size_in_datum; ++i) {
sum_blob.add_data(0.);
}
// start collecting
LOG(INFO) << "Starting Iteration";
if (db_backend == "leveldb") { // leveldb
for (it->SeekToFirst(); it->Valid(); it->Next()) {
// just a dummy operation
datum.ParseFromString(it->value().ToString());
const string& data = datum.data();
size_in_datum = std::max<int>(datum.data().size(),
datum.float_data_size());
CHECK_EQ(size_in_datum, data_size) << "Incorrect data field size " <<
size_in_datum;
if (data.size() != 0) {
for (int i = 0; i < size_in_datum; ++i) {
sum_blob.set_data(i, sum_blob.data(i) + (uint8_t)data[i]);
}
} else {
for (int i = 0; i < size_in_datum; ++i) {
sum_blob.set_data(i, sum_blob.data(i) +
static_cast<float>(datum.float_data(i)));
}
}
++count;
if (count % 10000 == 0) {
//.........这里部分代码省略.........
示例15: DataLayerPrefetch
void* DataLayerPrefetch(void* layer_pointer) {
CHECK(layer_pointer);
DataLayer<Dtype>* layer = reinterpret_cast<DataLayer<Dtype>*>(layer_pointer);
CHECK(layer);
Datum datum;
CHECK(layer->prefetch_data_);
Dtype* top_data = layer->prefetch_data_->mutable_cpu_data();
Dtype* top_label = layer->prefetch_label_->mutable_cpu_data();
const Dtype scale = layer->layer_param_.scale();
const int batchsize = layer->layer_param_.batchsize();
const int cropsize = layer->layer_param_.cropsize();
const bool mirror = layer->layer_param_.mirror();
if (mirror && cropsize == 0) {
LOG(FATAL) << "Current implementation requires mirror and cropsize to be "
<< "set at the same time.";
}
// datum scales
const int channels = layer->datum_channels_;
const int height = layer->datum_height_;
const int width = layer->datum_width_;
const int size = layer->datum_size_;
const Dtype* mean = layer->data_mean_.cpu_data();
for (int itemid = 0; itemid < batchsize; ++itemid) {
// get a blob
CHECK(layer->iter_);
CHECK(layer->iter_->Valid());
datum.ParseFromString(layer->iter_->value().ToString());
const string& data = datum.data();
if (cropsize) {
//CHECK(data.size()) << "Image cropping only support uint8 data";
int h_off, w_off;
// We only do random crop when we do training.
if (Caffe::phase() == Caffe::TRAIN) {
// NOLINT_NEXT_LINE(runtime/threadsafe_fn)
h_off = rand() % (height - cropsize);
// NOLINT_NEXT_LINE(runtime/threadsafe_fn)
w_off = rand() % (width - cropsize);
} else {
h_off = (height - cropsize) / 2;
w_off = (width - cropsize) / 2;
}
// NOLINT_NEXT_LINE(runtime/threadsafe_fn)
if (mirror && rand() % 2) {
// Copy mirrored version
for (int c = 0; c < channels; ++c) {
for (int h = 0; h < cropsize; ++h) {
for (int w = 0; w < cropsize; ++w) {
top_data[((itemid * channels + c) * cropsize + h) * cropsize
+ cropsize - 1 - w] =
(static_cast<Dtype>(
(float)datum.float_data((c * height + h + h_off) * width
+ w + w_off))
- mean[(c * height + h + h_off) * width + w + w_off])
* scale;
}
}
}
} else {
// Normal copy
for (int c = 0; c < channels; ++c) {
for (int h = 0; h < cropsize; ++h) {
for (int w = 0; w < cropsize; ++w) {
top_data[((itemid * channels + c) * cropsize + h) * cropsize + w]
= (static_cast<Dtype>(
(float)datum.float_data((c * height + h + h_off) * width
+ w + w_off))
- mean[(c * height + h + h_off) * width + w + w_off])
* scale;
}
}
}
}
} else {
// we will prefer to use data() first, and then try float_data()
if (data.size()) {
//cout << "unint8 data!!!!" << endl;
for (int j = 0; j < size; ++j) {
//cout << "datum.int_data " << j << "of size: " << size << static_cast<Dtype>((uint8_t)data[j]) << " mean: " << mean[j] << endl;
top_data[itemid * size + j] =
(static_cast<Dtype>((uint8_t)data[j]) - mean[j]) * scale;
}
} else {
//cout << "float data !!!!!!!!!!!" << endl;
for (int j = 0; j < size; ++j) {
//cout << "item: " << itemid <<" datum.float_data " << j << "of size: " << size << endl;
//cout << datum.float_data(j) << " mean: " << mean[j] << endl;
top_data[itemid * size + j] =
(datum.float_data(j) - mean[j]) * scale;
}
}
}
top_label[itemid] = datum.label();
// go to the next iter
layer->iter_->Next();
if (!layer->iter_->Valid()) {
//.........这里部分代码省略.........