本文整理汇总了C++中Datum::label_size方法的典型用法代码示例。如果您正苦于以下问题:C++ Datum::label_size方法的具体用法?C++ Datum::label_size怎么用?C++ Datum::label_size使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Datum
的用法示例。
在下文中一共展示了Datum::label_size方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: CHECK
void DataLayer<Dtype>::InternalThreadEntry() {
Datum datum;
CHECK(this->prefetch_data_.count());
Dtype* top_data = this->prefetch_data_.mutable_cpu_data();
Dtype* top_label = NULL; // suppress warnings about uninitialized variables
if (this->output_labels_) {
top_label = this->prefetch_label_.mutable_cpu_data();
}
const int batch_size = this->layer_param_.data_param().batch_size();
for (int item_id = 0; item_id < batch_size; ++item_id) {
// get a blob
switch (this->layer_param_.data_param().backend()) {
case DataParameter_DB_LEVELDB:
CHECK(iter_);
CHECK(iter_->Valid());
datum.ParseFromString(iter_->value().ToString());
break;
case DataParameter_DB_LMDB:
CHECK_EQ(mdb_cursor_get(mdb_cursor_, &mdb_key_,
&mdb_value_, MDB_GET_CURRENT), MDB_SUCCESS);
datum.ParseFromArray(mdb_value_.mv_data,
mdb_value_.mv_size);
break;
default:
LOG(FATAL) << "Unknown database backend";
}
// Apply data transformations (mirror, scale, crop...)
this->data_transformer_.Transform(item_id, datum, this->mean_, top_data);
if (this->output_labels_) {
// liu
// top_label[item_id] = datum.label();
// LOG(ERROR) << "label size " << datum.label_size() << " " << datum.label(0) \
<< " " << datum.label(1) << " " << datum.label(2) << " " << datum.label(3);
for(int label_i=0; label_i < datum.label_size(); label_i++){
top_label[item_id * datum.label_size() + label_i] = datum.label(label_i);
}
}
// go to the next iter
switch (this->layer_param_.data_param().backend()) {
case DataParameter_DB_LEVELDB:
iter_->Next();
if (!iter_->Valid()) {
// We have reached the end. Restart from the first.
DLOG(INFO) << "Restarting data prefetching from start.";
iter_->SeekToFirst();
}
break;
case DataParameter_DB_LMDB:
if (mdb_cursor_get(mdb_cursor_, &mdb_key_,
&mdb_value_, MDB_NEXT) != MDB_SUCCESS) {
// We have reached the end. Restart from the first.
DLOG(INFO) << "Restarting data prefetching from start.";
CHECK_EQ(mdb_cursor_get(mdb_cursor_, &mdb_key_,
&mdb_value_, MDB_FIRST), MDB_SUCCESS);
}
break;
default:
LOG(FATAL) << "Unknown database backend";
}
}
}
示例2: switch
//.........这里部分代码省略.........
options, this->layer_param_.data_param().source(), &db_temp);
CHECK(status.ok()) << "Failed to open leveldb "
<< this->layer_param_.data_param().source() << std::endl
<< status.ToString();
db_.reset(db_temp);
iter_.reset(db_->NewIterator(leveldb::ReadOptions()));
iter_->SeekToFirst();
idx_ = 0;
}
break;
case DataParameter_DB_LMDB:
CHECK_EQ(mdb_env_create(&mdb_env_), MDB_SUCCESS) << "mdb_env_create failed";
CHECK_EQ(mdb_env_set_mapsize(mdb_env_, 1099511627776), MDB_SUCCESS); // 1TB
CHECK_EQ(mdb_env_open(mdb_env_,
this->layer_param_.data_param().source().c_str(),
MDB_RDONLY|MDB_NOTLS, 0664), MDB_SUCCESS) << "mdb_env_open failed";
CHECK_EQ(mdb_txn_begin(mdb_env_, NULL, MDB_RDONLY, &mdb_txn_), MDB_SUCCESS)
<< "mdb_txn_begin failed";
CHECK_EQ(mdb_open(mdb_txn_, NULL, 0, &mdb_dbi_), MDB_SUCCESS)
<< "mdb_open failed";
CHECK_EQ(mdb_cursor_open(mdb_txn_, mdb_dbi_, &mdb_cursor_), MDB_SUCCESS)
<< "mdb_cursor_open failed";
LOG(INFO) << "Opening lmdb " << this->layer_param_.data_param().source();
CHECK_EQ(mdb_cursor_get(mdb_cursor_, &mdb_key_, &mdb_value_, MDB_FIRST),
MDB_SUCCESS) << "mdb_cursor_get failed";
break;
default:
LOG(FATAL) << "Unknown database backend";
}
// Check if we would need to randomly skip a few data points
if (this->layer_param_.data_param().rand_skip()) {
unsigned int skip = caffe_rng_rand() %
this->layer_param_.data_param().rand_skip();
LOG(INFO) << "Skipping first " << skip << " data points.";
while (skip-- > 0) {
switch (this->layer_param_.data_param().backend()) {
case DataParameter_DB_LEVELDB:
iter_->Next();
idx_++;
if (!iter_->Valid()) {
iter_->SeekToFirst();
idx_ = 0;
}
break;
case DataParameter_DB_LMDB:
if (mdb_cursor_get(mdb_cursor_, &mdb_key_, &mdb_value_, MDB_NEXT)
!= MDB_SUCCESS) {
CHECK_EQ(mdb_cursor_get(mdb_cursor_, &mdb_key_, &mdb_value_,
MDB_FIRST), MDB_SUCCESS);
}
break;
default:
LOG(FATAL) << "Unknown database backend";
}
}
}
// Read a data point, and use it to initialize the top blob.
Datum datum;
switch (this->layer_param_.data_param().backend()) {
case DataParameter_DB_LEVELDB:
datum.ParseFromString(iter_->value().ToString());
//LOG(INFO)<<idx_;
break;
case DataParameter_DB_LMDB:
datum.ParseFromArray(mdb_value_.mv_data, mdb_value_.mv_size);
break;
default:
LOG(FATAL) << "Unknown database backend";
}
// image
int crop_size = this->layer_param_.transform_param().crop_size();
if (crop_size > 0) {
(*top)[0]->Reshape(this->layer_param_.data_param().batch_size(),
datum.channels(), crop_size, crop_size);
this->prefetch_data_.Reshape(this->layer_param_.data_param().batch_size(),
datum.channels(), crop_size, crop_size);
} else {
(*top)[0]->Reshape(
this->layer_param_.data_param().batch_size(), datum.channels(),
datum.height(), datum.width());
this->prefetch_data_.Reshape(this->layer_param_.data_param().batch_size(),
datum.channels(), datum.height(), datum.width());
}
LOG(INFO) << "output data size: " << (*top)[0]->num() << ","
<< (*top)[0]->channels() << "," << (*top)[0]->height() << ","
<< (*top)[0]->width();
// label
if (this->output_labels_) {
(*top)[1]->Reshape(this->layer_param_.data_param().batch_size(), datum.label_size(), 1, 1);
this->prefetch_label_.Reshape(this->layer_param_.data_param().batch_size(),
datum.label_size(), 1, 1);
}
// datum size
this->datum_channels_ = datum.channels();
this->datum_height_ = datum.height();
this->datum_width_ = datum.width();
this->datum_size_ = datum.channels() * datum.height() * datum.width();
}
示例3: PoseImageDataLayerPrefetch
//.........这里部分代码省略.........
if(color_aug)
{
thisRand = random(0.8,1.2);
}
for (int h = 0; h < new_height; ++h)
{
for (int w = 0; w < new_width; ++w)
{
int top_index = ((item_id * channels + c)
* new_height + h) * new_width + w;
int data_index = (c * height + h + h_off) * width
+ w + w_off;
Dtype datum_element =
static_cast<Dtype>(static_cast<uint8_t>(data[data_index]));
top_data[top_index] = (datum_element
- mean[data_index]) * scale;
//img.at<cv::Vec3b>(h, w)[c] = (uchar)(datum_element * scale) * thisRand;
top_data[top_index] = min(top_data[top_index] * thisRand, (Dtype)(255.0));
}
}
}
//imwrite(ss1, img);
}
}
else
{
// Just copy the whole data
if (data.size())
{
for (int j = 0; j < size; ++j)
{
Dtype datum_element =
static_cast<Dtype>(static_cast<uint8_t>(data[j]));
top_data[item_id * size + j] = (datum_element - mean[j])
* scale;
}
}
else
{
for (int j = 0; j < size; ++j)
{
top_data[item_id * size + j] = (datum.float_data(j)
- mean[j]) * scale;
}
}
}
float lblratio = new_height / out_height;
vector<int> pts;
for (int label_i = 0; label_i < datum.label_size(); label_i++)
{
pts.push_back( datum.label(label_i) / lblratio );
}
int lblLen = key_point_num * out_height * out_width;
PoseReadLabel(pts, was, top_label + item_id * lblLen, out_height, out_width);
/*for(int ci = 0; ci < key_point_num; ci ++)
{
Mat img(Size(out_height, out_width), CV_8UC3);
sprintf(ss2,"/home/dragon123/cnncode/showimg/%d_%d_gt.jpg",item_id, ci);
for(int h = 0; h < out_height; h ++)
for(int w = 0; w < out_width; w ++)
{
int clr = top_label[item_id * lblLen + ci * out_height * out_width + h * out_width + w];
if(clr <= 0)
{
if(clr == 0) for(int c = 0; c < 3; c ++) img.at<cv::Vec3b>(h, w)[c] = 0;
if(clr < 0) for(int c = 0; c < 3; c ++) img.at<cv::Vec3b>(h, w)[c] = 128;
}
else
{
for(int c = 0; c < 3; c ++) img.at<cv::Vec3b>(h, w)[c] = 255;
}
}
imwrite(ss2, img);
}*/
// go to the next iter
layer->lines_id_++;
if (layer->lines_id_ >= lines_size)
{
// We have reached the end. Restart from the first.
DLOG(INFO) << "Restarting data prefetching from start.";
layer->lines_id_ = 0;
if (layer->layer_param_.pose_image_data_param().shuffle())
{
layer->ShuffleImages();
}
}
}
delete was;
return reinterpret_cast<void*>(NULL);
}
示例4: ImageDataLayerPrefetch
//.........这里部分代码省略.........
int h_off, w_off;
// We only do random crop when we do training.
if (false && layer->phase_ == Caffe::TRAIN)
{
h_off = layer->PrefetchRand() % (height - crop_size);
w_off = layer->PrefetchRand() % (width - crop_size);
}
else
{
h_off = (height - crop_size) / 2;
w_off = (width - crop_size) / 2;
}
if (mirror && layer->PrefetchRand() % 2)
{
// Copy mirrored version
for (int c = 0; c < channels; ++c)
{
for (int h = 0; h < crop_size; ++h)
{
for (int w = 0; w < crop_size; ++w)
{
int top_index = ((item_id * channels + c)
* crop_size + h) * crop_size
+ (crop_size - 1 - w);
int data_index = (c * height + h + h_off) * width
+ w + w_off;
Dtype datum_element =
static_cast<Dtype>(static_cast<uint8_t>(data[data_index]));
top_data[top_index] = (datum_element
- mean[data_index]) * scale;
}
}
}
}
else
{
// Normal copy
for (int c = 0; c < channels; ++c)
{
for (int h = 0; h < crop_size; ++h)
{
for (int w = 0; w < crop_size; ++w)
{
int top_index = ((item_id * channels + c)
* crop_size + h) * crop_size + w;
int data_index = (c * height + h + h_off) * width
+ w + w_off;
Dtype datum_element =
static_cast<Dtype>(static_cast<uint8_t>(data[data_index]));
top_data[top_index] = (datum_element
- mean[data_index]) * scale;
}
}
}
}
}
else
{
// Just copy the whole data
if (data.size())
{
for (int j = 0; j < size; ++j)
{
Dtype datum_element =
static_cast<Dtype>(static_cast<uint8_t>(data[j]));
top_data[item_id * size + j] = (datum_element - mean[j])
* scale;
}
}
else
{
for (int j = 0; j < size; ++j)
{
top_data[item_id * size + j] = (datum.float_data(j)
- mean[j]) * scale;
}
}
}
//top_label[item_id] = datum.label();
for (int label_i = 0; label_i < datum.label_size(); label_i++)
{
top_label[item_id * datum.label_size() + label_i] = datum.label(label_i);
}
// go to the next iter
layer->lines_id_++;
if (layer->lines_id_ >= lines_size)
{
// We have reached the end. Restart from the first.
DLOG(INFO) << "Restarting data prefetching from start.";
layer->lines_id_ = 0;
if (layer->layer_param_.image_data_param().shuffle())
{
layer->ShuffleImages();
}
}
}
return reinterpret_cast<void*>(NULL);
}
示例5: DataLayerPrefetch
void* DataLayerPrefetch(void* layer_pointer) {
CHECK(layer_pointer);
DataLayer<Dtype>* layer = static_cast<DataLayer<Dtype>*>(layer_pointer);
CHECK(layer);
Datum datum;
CHECK(layer->prefetch_data_);
Dtype* top_data = layer->prefetch_data_->mutable_cpu_data(); //数据
Dtype* top_label; //标签
if (layer->output_labels_) {
top_label = layer->prefetch_label_->mutable_cpu_data();
}
const Dtype scale = layer->layer_param_.data_param().scale();
const int batch_size = layer->layer_param_.data_param().batch_size();
const int crop_size = layer->layer_param_.data_param().crop_size();
const bool mirror = layer->layer_param_.data_param().mirror();
if (mirror && crop_size == 0) {//当前实现需要同时设置mirror和cropsize
LOG(FATAL) << "Current implementation requires mirror and crop_size to be "
<< "set at the same time.";
}
// datum scales
const int channels = layer->datum_channels_;
const int height = layer->datum_height_;
const int width = layer->datum_width_;
const int size = layer->datum_size_;
const Dtype* mean = layer->data_mean_.cpu_data();
for (int item_id = 0; item_id < batch_size; ++item_id) {
//每一批数据的数量是batchsize,一个循环拉取一张
// get a blob
CHECK(layer->iter_);
CHECK(layer->iter_->Valid());
datum.ParseFromString(layer->iter_->value().ToString());//利用迭代器拉取下一批数据
const string& data = datum.data();
int label_blob_channels = layer->prefetch_label_->channels();
int label_data_dim = datum.label_size();
CHECK_EQ(layer->prefetch_label_->channels(), datum.label_size()) << "label size is NOT the same.";
if (crop_size) {//如果需要裁剪
CHECK(data.size()) << "Image cropping only support uint8 data";
int h_off, w_off;
// We only do random crop when we do training.
//只是在训练阶段做随机裁剪
if (layer->phase_ == Caffe::TRAIN) {
h_off = layer->PrefetchRand() % (height - crop_size);
w_off = layer->PrefetchRand() % (width - crop_size);
} else {//测试阶段固定裁剪
h_off = (height - crop_size) / 2;
w_off = (width - crop_size) / 2;
}
//怎么感觉下面两种情况的代码是一样的?
if (mirror && layer->PrefetchRand() % 2) {
// Copy mirrored version
for (int c = 0; c < channels; ++c) {
for (int h = 0; h < crop_size; ++h) {
for (int w = 0; w < crop_size; ++w) {
int top_index = ((item_id * channels + c) * crop_size + h)
* crop_size + (crop_size - 1 - w);
int data_index = (c * height + h + h_off) * width + w + w_off;
Dtype datum_element =
static_cast<Dtype>(static_cast<uint8_t>(data[data_index]));
top_data[top_index] = (datum_element - mean[data_index]) * scale;
}
}
}
} else {//如果不需要裁剪
// Normal copy
//我们优先考虑data(),然后float_data()
for (int c = 0; c < channels; ++c) {
for (int h = 0; h < crop_size; ++h) {
for (int w = 0; w < crop_size; ++w) {
int top_index = ((item_id * channels + c) * crop_size + h)
* crop_size + w;
int data_index = (c * height + h + h_off) * width + w + w_off;
Dtype datum_element =
static_cast<Dtype>(static_cast<uint8_t>(data[data_index]));
top_data[top_index] = (datum_element - mean[data_index]) * scale;
}
}
}
}
} else {
// we will prefer to use data() first, and then try float_data()
if (data.size()) {
for (int j = 0; j < size; ++j) {
Dtype datum_element =
static_cast<Dtype>(static_cast<uint8_t>(data[j]));
top_data[item_id * size + j] = (datum_element - mean[j]) * scale;
}
} else {
for (int j = 0; j < size; ++j) {
top_data[item_id * size + j] =
(datum.float_data(j) - mean[j]) * scale;
}
}
}
//.........这里部分代码省略.........
示例6:
void DataLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
CHECK_EQ(bottom.size(), 0) << "Data Layer takes no input blobs.";
CHECK_GE(top->size(), 1) << "Data Layer takes at least one blob as output.";
CHECK_LE(top->size(), 2) << "Data Layer takes at most two blobs as output.";
if (top->size() == 1) {
output_labels_ = false;
} else {
output_labels_ = true;
}
// Initialize the leveldb
leveldb::DB* db_temp;
leveldb::Options options;
options.create_if_missing = false;
options.max_open_files = 100;
LOG(INFO) << "Opening leveldb " << this->layer_param_.data_param().source();
leveldb::Status status = leveldb::DB::Open(
options, this->layer_param_.data_param().source(), &db_temp);
CHECK(status.ok()) << "Failed to open leveldb "
<< this->layer_param_.data_param().source() << std::endl
<< status.ToString();
db_.reset(db_temp);
iter_.reset(db_->NewIterator(leveldb::ReadOptions()));//通过迭代器来操纵leveldb
iter_->SeekToFirst();
// Check if we would need to randomly skip a few data points
//是否要随机跳过一些数据
if (this->layer_param_.data_param().rand_skip()) {
unsigned int skip = caffe_rng_rand() %
this->layer_param_.data_param().rand_skip();
LOG(INFO) << "Skipping first " << skip << " data points.";
while (skip-- > 0) {
iter_->Next();
if (!iter_->Valid()) {
iter_->SeekToFirst();
}
}
}
// Read a data point, and use it to initialize the top blob.
//读取一个数据点,用来初始化topblob。所谓初始化,只要是指reshape。
//可以观察到下面iter_调用调用next。所以这次读取只是用来读取出来channels等参数的,不作处理。
Datum datum;
datum.ParseFromString(iter_->value().ToString());//利用迭代器读取第一个数据点
// image图像数据
int crop_size = this->layer_param_.data_param().crop_size();//裁剪大小
if (crop_size > 0) {//需要裁剪
(*top)[0]->Reshape(this->layer_param_.data_param().batch_size(),
datum.channels(), crop_size, crop_size);
prefetch_data_.reset(new Blob<Dtype>(
this->layer_param_.data_param().batch_size(), datum.channels(),
crop_size, crop_size));
} else {//不需要裁剪
(*top)[0]->Reshape(
this->layer_param_.data_param().batch_size(), datum.channels(),
datum.height(), datum.width());
prefetch_data_.reset(new Blob<Dtype>(
this->layer_param_.data_param().batch_size(), datum.channels(),
datum.height(), datum.width()));
}
LOG(INFO) << "output data size: " << (*top)[0]->num() << ","
<< (*top)[0]->channels() << "," << (*top)[0]->height() << ","
<< (*top)[0]->width();
/*
// label标签数据
if (output_labels_) {
(*top)[1]->Reshape(this->layer_param_.data_param().batch_size(), 1, 1, 1);
prefetch_label_.reset(
new Blob<Dtype>(this->layer_param_.data_param().batch_size(), 1, 1, 1));
}
*/
// label标签数据
if (output_labels_) {
(*top)[1]->Reshape(this->layer_param_.data_param().batch_size(), datum.label_size(), 1, 1);
prefetch_label_.reset(
new Blob<Dtype>(this->layer_param_.data_param().batch_size(), datum.label_size(), 1, 1));
}
// datum size
datum_channels_ = datum.channels();
datum_height_ = datum.height();
datum_width_ = datum.width();
datum_size_ = datum.channels() * datum.height() * datum.width();
CHECK_GT(datum_height_, crop_size);
CHECK_GT(datum_width_, crop_size);
// check if we want to have mean 是否要减去均值
if (this->layer_param_.data_param().has_mean_file()) {
const string& mean_file = this->layer_param_.data_param().mean_file();
LOG(INFO) << "Loading mean file from" << mean_file;
BlobProto blob_proto;
//.........这里部分代码省略.........